From 8b0b0c36d3301990d92f72e763e4d49965ab13d1 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 6 Jan 2018 13:03:44 -0500 Subject: [PATCH 01/16] Remove out-of-date projectile file This commit removes an out-of-date projectile file from the top-level directory. Relates #28115 --- .projectile | 31 ------------------------------- 1 file changed, 31 deletions(-) delete mode 100644 .projectile diff --git a/.projectile b/.projectile deleted file mode 100644 index 49e2b292c2610..0000000000000 --- a/.projectile +++ /dev/null @@ -1,31 +0,0 @@ --/target --/core/target --/qa/target --/rest-api-spec/target --/test-framework/target --/plugins/target --/plugins/analysis-icu/target --/plugins/analysis-kuromoji/target --/plugins/analysis-phonetic/target --/plugins/analysis-smartcn/target --/plugins/analysis-stempel/target --/plugins/cloud-aws/target --/plugins/cloud-azure/target --/plugins/cloud-gce/target --/plugins/delete-by-query/target --/plugins/discovery-azure/target --/plugins/discovery-ec2/target --/plugins/discovery-gce/target --/plugins/jvm-example/target --/plugins/lang-expression/target --/plugins/lang-groovy/target --/plugins/lang-javascript/target --/plugins/lang-python/target --/plugins/mapper-murmur3/target --/plugins/mapper-size/target --/plugins/repository-azure/target --/plugins/repository-s3/target --/plugins/site-example/target --/plugins/store-smb/target --/plugins/target --*.class From eaa636d4bbe248aa70017d3df40f6c755ecbe8d1 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 6 Jan 2018 22:44:43 -0500 Subject: [PATCH 02/16] Clarify reproduce info on Windows This commit correct the test failure reproduction line on Windows. Relates #28104 --- .../org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy | 4 +++- .../test/junit/listeners/ReproduceInfoPrinter.java | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index b3cc096ee2186..82e4ac9b71cd0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -1,6 +1,7 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin +import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.FileContentsTask import org.gradle.api.* import org.gradle.api.artifacts.dsl.RepositoryHandler @@ -343,8 +344,9 @@ class VagrantTestPlugin implements Plugin { TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() { @Override void afterExecute(Task task, TaskState state) { + final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew" if (state.failure != null) { - println "REPRODUCE WITH: ./gradlew ${packaging.path} " + + println "REPRODUCE WITH: ${gradlew} ${packaging.path} " + "-Dtests.seed=${project.testSeed} " } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index cb2c9ff1d2639..ca16ac6204a90 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -20,6 +20,7 @@ import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; @@ -71,7 +72,8 @@ public void testFailure(Failure failure) throws Exception { return; } - final StringBuilder b = new StringBuilder("REPRODUCE WITH: ./gradlew "); + final String gradlew = Constants.WINDOWS ? "gradlew" : "./gradlew"; + final StringBuilder b = new StringBuilder("REPRODUCE WITH: " + gradlew + " "); String task = System.getProperty("tests.task"); // TODO: enforce (intellij still runs the runner?) or use default "test" but that won't work for integ b.append(task); From 2e08916fa6e1e66402c756b2764d65d63ca53100 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sun, 7 Jan 2018 13:55:15 -0800 Subject: [PATCH 03/16] Test: Add assumeFalse for test that cannot pass on windows closes #28095 --- .../java/org/elasticsearch/plugins/PluginsServiceTests.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 6d01ff14a399c..16c3eb34b0e63 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -531,6 +531,12 @@ public void testJarHellTransitiveMap() throws Exception { } public void testNonExtensibleDep() throws Exception { + // This test opens a child classloader, reading a jar under the test temp + // dir (a dummy plugin). Classloaders are closed by GC, so when test teardown + // occurs the jar is deleted while the classloader is still open. However, on + // windows, files cannot be deleted when they are still open by a process. + assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); + Path homeDir = createTempDir(); Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), homeDir).build(); Path pluginsDir = homeDir.resolve("plugins"); From b46bb2efae5e2d00733b8bafa405c1d35bcd9849 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Sun, 7 Jan 2018 23:20:44 +0100 Subject: [PATCH 04/16] test: do not use asn fields Closes #28124 --- .../ingest/geoip/GeoIpProcessorFactoryTests.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 49bc4f3b9f2f2..58119cc1af983 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -209,11 +209,15 @@ public void testBuildFields() throws Exception { Set properties = EnumSet.noneOf(GeoIpProcessor.Property.class); List fieldNames = new ArrayList<>(); + + int counter = 0; int numFields = scaledRandomIntBetween(1, GeoIpProcessor.Property.values().length); - for (int i = 0; i < numFields; i++) { - GeoIpProcessor.Property property = GeoIpProcessor.Property.values()[i]; + for (GeoIpProcessor.Property property : GeoIpProcessor.Property.ALL_CITY_PROPERTIES) { properties.add(property); fieldNames.add(property.name().toLowerCase(Locale.ROOT)); + if (++counter >= numFields) { + break; + } } Map config = new HashMap<>(); config.put("field", "_field"); From db186311c65227b721ad49f275d5b26bdf23986f Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Mon, 8 Jan 2018 11:11:41 +1000 Subject: [PATCH 05/16] Use the underlying connection version for CCS connections (#28093) Previously this would default to the version of the remote Node. However, if the remote cluster was mixed-version (e.g. it was part way through a rolling upgrade), then the TransportService may have negotiated a connection version that is not identical to connected Node's version. This mismatch would cause the Stream and the (Remote)Connection to report different version numbers, which could cause data to be sent over the wire using an incorrect serialization version. --- .../transport/RemoteClusterConnection.java | 6 ++ .../RemoteClusterConnectionTests.java | 61 +++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 55edd0c86ec29..e73debc601430 100644 --- a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -23,6 +23,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; @@ -280,6 +281,11 @@ public void sendRequest(long requestId, String action, TransportRequest request, public void close() throws IOException { assert false: "proxy connections must not be closed"; } + + @Override + public Version getVersion() { + return connection.getVersion(); + } }; } diff --git a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index e6d278af085b2..e7dccf702fe26 100644 --- a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -81,7 +81,11 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; public class RemoteClusterConnectionTests extends ESTestCase { @@ -305,6 +309,63 @@ public void testConnectWithIncompatibleTransports() throws Exception { } } + public void testRemoteConnectionVersionMatchesTransportConnectionVersion() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + final Version previousVersion = VersionUtils.getPreviousVersion(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, previousVersion); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + assertThat(seedNode, notNullValue()); + knownNodes.add(seedNode); + + DiscoveryNode oldVersionNode = discoverableTransport.getLocalDiscoNode(); + assertThat(oldVersionNode, notNullValue()); + knownNodes.add(oldVersionNode); + + assertThat(seedNode.getVersion(), not(equalTo(oldVersionNode.getVersion()))); + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + final Transport.Connection seedConnection = new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return seedNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + // no-op + } + + @Override + public void close() throws IOException { + // no-op + } + }; + service.addDelegate(seedNode.getAddress(), new MockTransportService.DelegateTransport(service.getOriginalTransport()) { + @Override + public Connection getConnection(DiscoveryNode node) { + if (node == seedNode) { + return seedConnection; + } + return super.getConnection(node); + } + }); + service.start(); + service.acceptIncomingRequests(); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + connection.addConnectedNode(seedNode); + for (DiscoveryNode node : knownNodes) { + final Transport.Connection transportConnection = connection.getConnection(node); + assertThat(transportConnection.getVersion(), equalTo(previousVersion)); + } + assertThat(knownNodes, iterableWithSize(2)); + } + } + } + } + @SuppressForbidden(reason = "calls getLocalHost here but it's fine in this case") public void testSlowNodeCanBeCanceled() throws IOException, InterruptedException { try (ServerSocket socket = new MockServerSocket()) { From ae9c3281fcf8c79831e12831cfc99318fdec6828 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Mon, 8 Jan 2018 10:02:56 +0100 Subject: [PATCH 06/16] [TEST] Wait for replicas to be allocated before shrinking The full-cluster-restart tests are run with two nodes. This can lead to situations where the shrink tests fail because the replicas are not allocated yet and the shrink operation needs the source shards to be available on the same node. --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index ec755cda6b8d1..06b8406b078dd 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -388,6 +388,8 @@ public void testShrink() throws IOException { .endObject(); }); + ensureGreen(index); // wait for source index to be available on both nodes before starting shrink + String updateSettingsRequestBody = "{\"settings\": {\"index.blocks.write\": true}}"; Response rsp = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), new StringEntity(updateSettingsRequestBody, ContentType.APPLICATION_JSON)); @@ -453,6 +455,8 @@ public void testShrinkAfterUpgrade() throws IOException { .endObject(); }); } else { + ensureGreen(index); // wait for source index to be available on both nodes before starting shrink + String updateSettingsRequestBody = "{\"settings\": {\"index.blocks.write\": true}}"; Response rsp = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), new StringEntity(updateSettingsRequestBody, ContentType.APPLICATION_JSON)); From fd45a46ce84e6e3f08015ab0fb49308bf8d287e9 Mon Sep 17 00:00:00 2001 From: olcbean <26058559+olcbean@users.noreply.github.com> Date: Mon, 8 Jan 2018 10:57:45 +0100 Subject: [PATCH 07/16] Deprecate `isShardsAcked()` in favour of `isShardsAcknowledged()` (#27819) Several responses include the shards_acknowledged flag (indicating whether the requisite number of shard copies started before the completion of the operation) and there are two different getters used : isShardsAcknowledged() and isShardsAcked(). This PR deprecates the isShardsAcked() in favour of isShardsAcknowledged() in CreateIndexResponse, RolloverResponse and CreateIndexClusterStateUpdateResponse. Closes #27784 --- .../IndicesClientDocumentationIT.java | 5 ++-- .../indices/create/CreateIndexRequest.java | 2 +- .../create/CreateIndexRequestBuilder.java | 2 +- .../indices/create/CreateIndexResponse.java | 30 +++++++++++++------ .../create/TransportCreateIndexAction.java | 2 +- .../indices/rollover/RolloverRequest.java | 2 +- .../rollover/RolloverRequestBuilder.java | 2 +- .../indices/rollover/RolloverResponse.java | 30 +++++++++++++------ .../rollover/TransportRolloverAction.java | 5 ++-- .../admin/indices/shrink/ResizeRequest.java | 2 +- .../indices/shrink/ResizeRequestBuilder.java | 2 +- .../admin/indices/shrink/ResizeResponse.java | 4 +-- .../indices/shrink/TransportResizeAction.java | 4 +-- ...CreateIndexClusterStateUpdateResponse.java | 15 +++++++--- .../metadata/MetaDataCreateIndexService.java | 10 +++---- .../admin/indices/create/CreateIndexIT.java | 6 ++-- .../create/CreateIndexResponseTests.java | 10 +++---- .../indices/open/OpenIndexResponseTests.java | 4 +-- .../support/ActiveShardsObserverIT.java | 6 ++-- .../RandomExceptionCircuitBreakerIT.java | 2 +- .../hamcrest/ElasticsearchAssertions.java | 2 +- 21 files changed, 90 insertions(+), 57 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 56a241f8d92c8..42d19fab82fe9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -190,10 +190,10 @@ public void testCreateIndex() throws IOException { // tag::create-index-response boolean acknowledged = createIndexResponse.isAcknowledged(); // <1> - boolean shardsAcked = createIndexResponse.isShardsAcked(); // <2> + boolean shardsAcknowledged = createIndexResponse.isShardsAcknowledged(); // <2> // end::create-index-response assertTrue(acknowledged); - assertTrue(shardsAcked); + assertTrue(shardsAcknowledged); } } @@ -202,7 +202,6 @@ public void testCreateIndexAsync() throws Exception { { CreateIndexRequest request = new CreateIndexRequest("twitter"); - // tag::create-index-execute-async client.indices().createIndexAsync(request, new ActionListener() { @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index f628974834cb5..17941b582ec31 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -453,7 +453,7 @@ public ActiveShardCount waitForActiveShards() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index d5ad01da645d9..fabe269124e9e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -254,7 +254,7 @@ public CreateIndexRequestBuilder setUpdateAllTypes(boolean updateAllTypes) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index 5c07b4024ee7a..46203d369d9e1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -52,16 +52,16 @@ public class CreateIndexResponse extends AcknowledgedResponse implements ToXCont PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX, ObjectParser.ValueType.STRING); } - private boolean shardsAcked; + private boolean shardsAcknowledged; private String index; protected CreateIndexResponse() { } - protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked, String index) { + protected CreateIndexResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { super(acknowledged); - assert acknowledged || shardsAcked == false; // if its not acknowledged, then shards acked should be false too - this.shardsAcked = shardsAcked; + assert acknowledged || shardsAcknowledged == false; // if its not acknowledged, then shardsAcknowledged should be false too + this.shardsAcknowledged = shardsAcknowledged; this.index = index; } @@ -69,7 +69,7 @@ protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked, String public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readAcknowledged(in); - shardsAcked = in.readBoolean(); + shardsAcknowledged = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_5_6_0)) { index = in.readString(); } @@ -79,7 +79,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); - out.writeBoolean(shardsAcked); + out.writeBoolean(shardsAcknowledged); if (out.getVersion().onOrAfter(Version.V_5_6_0)) { out.writeString(index); } @@ -87,11 +87,23 @@ public void writeTo(StreamOutput out) throws IOException { /** * Returns true if the requisite number of shards were started before - * returning from the index creation operation. If {@link #isAcknowledged()} + * returning from the index creation operation. If {@link #isAcknowledged()} * is false, then this also returns false. + * + * @deprecated use {@link #isShardsAcknowledged()} */ + @Deprecated public boolean isShardsAcked() { - return shardsAcked; + return shardsAcknowledged; + } + + /** + * Returns true if the requisite number of shards were started before + * returning from the index creation operation. If {@link #isAcknowledged()} + * is false, then this also returns false. + */ + public boolean isShardsAcknowledged() { + return shardsAcknowledged; } public String index() { @@ -99,7 +111,7 @@ public String index() { } public void addCustomFields(XContentBuilder builder) throws IOException { - builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcked()); + builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcknowledged()); builder.field(INDEX.getPreferredName(), index()); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 0ac8d02f97760..372c2eb861237 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -79,7 +79,7 @@ protected void masterOperation(final CreateIndexRequest request, final ClusterSt .waitForActiveShards(request.waitForActiveShards()); createIndexService.createIndex(updateRequest, ActionListener.wrap(response -> - listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked(), indexName)), + listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcknowledged(), indexName)), listener::onFailure)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index c25fc7eb537d3..34d56239b5ce8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -202,7 +202,7 @@ CreateIndexRequest getCreateIndexRequest() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to + * to be active before returning. Check {@link RolloverResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java index 55df220ec0700..818def9d19a09 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -87,7 +87,7 @@ public RolloverRequestBuilder mapping(String type, String source) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to + * to be active before returning. Check {@link RolloverResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 8c1be3501a820..2dcf4f510470f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -48,19 +48,19 @@ public final class RolloverResponse extends ActionResponse implements ToXContent private boolean dryRun; private boolean rolledOver; private boolean acknowledged; - private boolean shardsAcked; + private boolean shardsAcknowledged; RolloverResponse() { } RolloverResponse(String oldIndex, String newIndex, Set conditionResults, - boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcked) { + boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { this.oldIndex = oldIndex; this.newIndex = newIndex; this.dryRun = dryRun; this.rolledOver = rolledOver; this.acknowledged = acknowledged; - this.shardsAcked = shardsAcked; + this.shardsAcknowledged = shardsAcknowledged; this.conditionStatus = conditionResults.stream() .map(result -> new AbstractMap.SimpleEntry<>(result.condition.toString(), result.matched)) .collect(Collectors.toSet()); @@ -105,7 +105,7 @@ public boolean isRolledOver() { * Returns true if the creation of the new rollover index and switching of the * alias to the newly created index was successful, and returns false otherwise. * If {@link #isDryRun()} is true, then this will also return false. If this - * returns false, then {@link #isShardsAcked()} will also return false. + * returns false, then {@link #isShardsAcknowledged()} will also return false. */ public boolean isAcknowledged() { return acknowledged; @@ -113,11 +113,23 @@ public boolean isAcknowledged() { /** * Returns true if the requisite number of shards were started in the newly - * created rollover index before returning. If {@link #isAcknowledged()} is + * created rollover index before returning. If {@link #isAcknowledged()} is * false, then this will also return false. + * + * @deprecated use {@link #isShardsAcknowledged()} */ + @Deprecated public boolean isShardsAcked() { - return shardsAcked; + return shardsAcknowledged; + } + + /** + * Returns true if the requisite number of shards were started in the newly + * created rollover index before returning. If {@link #isAcknowledged()} is + * false, then this will also return false. + */ + public boolean isShardsAcknowledged() { + return shardsAcknowledged; } @Override @@ -136,7 +148,7 @@ public void readFrom(StreamInput in) throws IOException { dryRun = in.readBoolean(); rolledOver = in.readBoolean(); acknowledged = in.readBoolean(); - shardsAcked = in.readBoolean(); + shardsAcknowledged = in.readBoolean(); } @Override @@ -152,7 +164,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(dryRun); out.writeBoolean(rolledOver); out.writeBoolean(acknowledged); - out.writeBoolean(shardsAcked); + out.writeBoolean(shardsAcknowledged); } @Override @@ -163,7 +175,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(ROLLED_OVER, rolledOver); builder.field(DRY_RUN, dryRun); builder.field(ACKNOWLEDGED, acknowledged); - builder.field(SHARDS_ACKED, shardsAcked); + builder.field(SHARDS_ACKED, shardsAcknowledged); builder.startObject(CONDITIONS); for (Map.Entry entry : conditionStatus) { builder.field(entry.getKey(), entry.getValue()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c66f534bd8130..48d03f3ac94c6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -140,8 +140,9 @@ public void onResponse(IndicesStatsResponse statsResponse) { activeShardsObserver.waitForActiveShards(new String[]{rolloverIndexName}, rolloverRequest.getCreateIndexRequest().waitForActiveShards(), rolloverRequest.masterNodeTimeout(), - isShardsAcked -> listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, - conditionResults, false, true, true, isShardsAcked)), + isShardsAcknowledged -> listener.onResponse(new RolloverResponse( + sourceIndexName, rolloverIndexName, conditionResults, false, true, true, + isShardsAcknowledged)), listener::onFailure); } else { listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index f2f648f70ffa9..016ada92794f6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -142,7 +142,7 @@ public String getSourceIndex() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcked()} to + * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 6d8d98c0d75f0..4443dfd9e6c5f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -56,7 +56,7 @@ public ResizeRequestBuilder setSettings(Settings settings) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcked()} to + * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java index cea74ced69cfc..efbb87e291b4d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java @@ -25,7 +25,7 @@ public final class ResizeResponse extends CreateIndexResponse { ResizeResponse() { } - ResizeResponse(boolean acknowledged, boolean shardsAcked, String index) { - super(acknowledged, shardsAcked, index); + ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { + super(acknowledged, shardsAcknowledged, index); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index c5a15be22a847..688d33a0be734 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -109,8 +109,8 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { createIndexService.createIndex( updateRequest, ActionListener.wrap(response -> - listener.onResponse(new ResizeResponse(response.isAcknowledged(), response.isShardsAcked(), - updateRequest.index())), listener::onFailure + listener.onResponse(new ResizeResponse(response.isAcknowledged(), response.isShardsAcknowledged(), + updateRequest.index())), listener::onFailure ) ); } diff --git a/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java b/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java index 4f0e99ae558fe..2e9089af79ac9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java +++ b/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java @@ -24,17 +24,24 @@ */ public class CreateIndexClusterStateUpdateResponse extends ClusterStateUpdateResponse { - private final boolean shardsAcked; + private final boolean shardsAcknowledged; - public CreateIndexClusterStateUpdateResponse(boolean acknowledged, boolean shardsAcked) { + public CreateIndexClusterStateUpdateResponse(boolean acknowledged, boolean shardsAcknowledged) { super(acknowledged); - this.shardsAcked = shardsAcked; + this.shardsAcknowledged = shardsAcknowledged; } /** * Returns whether the requisite number of shard copies started before the completion of the operation. + * + * @deprecated use {@link #isShardsAcknowledged()} */ + @Deprecated public boolean isShardsAcked() { - return shardsAcked; + return shardsAcknowledged; + } + + public boolean isShardsAcknowledged() { + return shardsAcknowledged; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 01783060c0b8a..4ef451e19478b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -191,9 +191,9 @@ public static void validateIndexOrAliasName(String index, BiFunction { if (response.isAcknowledged()) { activeShardsObserver.waitForActiveShards(new String[]{request.index()}, request.waitForActiveShards(), request.ackTimeout(), - shardsAcked -> { - if (shardsAcked == false) { + shardsAcknowledged -> { + if (shardsAcknowledged == false) { logger.debug("[{}] index created, but the operation timed out while waiting for " + "enough shards to be started.", request.index()); } - listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcked)); + listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcknowledged)); }, listener::onFailure); } else { listener.onResponse(new CreateIndexClusterStateUpdateResponse(false, false)); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 3f99e43739376..14d6647071453 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -308,14 +308,16 @@ public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception { .put(settings) .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "all") .build(); - assertFalse(client().admin().indices().prepareCreate("test-idx-2").setSettings(settings).setTimeout("100ms").get().isShardsAcked()); + assertFalse(client().admin().indices().prepareCreate("test-idx-2").setSettings(settings).setTimeout("100ms").get() + .isShardsAcknowledged()); // the numeric equivalent of all should also fail settings = Settings.builder() .put(settings) .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas + 1)) .build(); - assertFalse(client().admin().indices().prepareCreate("test-idx-3").setSettings(settings).setTimeout("100ms").get().isShardsAcked()); + assertFalse(client().admin().indices().prepareCreate("test-idx-3").setSettings(settings).setTimeout("100ms").get() + .isShardsAcknowledged()); } public void testInvalidPartitionSize() { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java index b0fdae9ca62b9..6f6518462213f 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -44,7 +44,7 @@ public void testSerialization() throws IOException { try (StreamInput in = output.bytes().streamInput()) { CreateIndexResponse serialized = new CreateIndexResponse(); serialized.readFrom(in); - assertEquals(response.isShardsAcked(), serialized.isShardsAcked()); + assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); assertEquals(response.index(), serialized.index()); } @@ -63,7 +63,7 @@ public void testSerializationWithOldVersion() throws IOException { in.setVersion(oldVersion); CreateIndexResponse serialized = new CreateIndexResponse(); serialized.readFrom(in); - assertEquals(response.isShardsAcked(), serialized.isShardsAcked()); + assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); assertNull(serialized.index()); } @@ -110,7 +110,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } assertEquals(createIndexResponse.index(), parsedCreateIndexResponse.index()); - assertEquals(createIndexResponse.isShardsAcked(), parsedCreateIndexResponse.isShardsAcked()); + assertEquals(createIndexResponse.isShardsAcknowledged(), parsedCreateIndexResponse.isShardsAcknowledged()); assertEquals(createIndexResponse.isAcknowledged(), parsedCreateIndexResponse.isAcknowledged()); } @@ -119,9 +119,9 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws */ private static CreateIndexResponse createTestItem() throws IOException { boolean acknowledged = randomBoolean(); - boolean shardsAcked = acknowledged && randomBoolean(); + boolean shardsAcknowledged = acknowledged && randomBoolean(); String index = randomAlphaOfLength(5); - return new CreateIndexResponse(acknowledged, shardsAcked, index); + return new CreateIndexResponse(acknowledged, shardsAcknowledged, index); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java index 09ceb7960347b..df49de0c1eeb0 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java @@ -57,7 +57,7 @@ public void testFromToXContent() throws IOException { private static OpenIndexResponse createTestItem() { boolean acknowledged = randomBoolean(); - boolean shardsAcked = acknowledged && randomBoolean(); - return new OpenIndexResponse(acknowledged, shardsAcked); + boolean shardsAcknowledged = acknowledged && randomBoolean(); + return new OpenIndexResponse(acknowledged, shardsAcknowledged); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java b/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java index f3611663b426a..2f0dd64b7ec02 100644 --- a/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java @@ -51,7 +51,7 @@ public void testCreateIndexNoActiveShardsTimesOut() throws Exception { .setWaitForActiveShards(randomBoolean() ? ActiveShardCount.from(1) : ActiveShardCount.ALL) .setTimeout("100ms") .get() - .isShardsAcked()); + .isShardsAcknowledged()); waitForIndexCreationToComplete(indexName); } @@ -86,7 +86,7 @@ public void testCreateIndexNotEnoughActiveShardsTimesOut() throws Exception { .setWaitForActiveShards(randomIntBetween(numDataNodes + 1, numReplicas + 1)) .setTimeout("100ms") .get() - .isShardsAcked()); + .isShardsAcknowledged()); waitForIndexCreationToComplete(indexName); } @@ -116,7 +116,7 @@ public void testCreateIndexWaitsForAllActiveShards() throws Exception { .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout("100ms") .get() - .isShardsAcked()); + .isShardsAcknowledged()); waitForIndexCreationToComplete(indexName); if (client().admin().indices().prepareExists(indexName).get().isExists()) { client().admin().indices().prepareDelete(indexName).get(); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index ed38ec8b05b96..4ab4cab52cf10 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -128,7 +128,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc .setSettings(settings) .addMapping("type", mapping, XContentType.JSON).execute().actionGet(); final int numDocs; - if (response.isShardsAcked() == false) { + if (response.isShardsAcknowledged() == false) { /* some seeds just won't let you create the index at all and we enter a ping-pong mode * trying one node after another etc. that is ok but we need to make sure we don't wait * forever when indexing documents so we set numDocs = 1 and expect all shards to fail diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 708a95e4a49f2..4eaaa96df7649 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -143,7 +143,7 @@ public static void assertAcked(CreateIndexResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); assertVersionSerializable(response); assertTrue(response.getClass().getSimpleName() + " failed - index creation acked but not all shards were started", - response.isShardsAcked()); + response.isShardsAcknowledged()); } /** From 04ce0e762562056b60e5a987ecbc397405b3e561 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 8 Jan 2018 15:03:40 +0100 Subject: [PATCH 08/16] Avoid concurrent snapshot finalizations when deleting an INIT snapshot (#28078) This commit removes the finalization of a snapshot by the snapshot deletion request. This way, the deletion marks the snapshot as ABORTED in cluster state and waits for the snapshot completion. It is the responsability of the snapshot execution to detect the abortion and terminates itself correctly. This avoids concurrent snapshot finalizations and also ordinates the operations: the deletion aborts the snapshot and waits for the snapshot completion, the creation detects the abortion and stops by itself and finalizes the snapshot, then the deletion resumes and continues the deletion process. --- .../snapshots/SnapshotsService.java | 73 ++++++++++++------- .../SharedClusterSnapshotRestoreIT.java | 6 +- 2 files changed, 48 insertions(+), 31 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index e19394714731f..ec63b7d228ec5 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -372,8 +372,8 @@ private void beginSnapshot(final ClusterState clusterState, return; } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - boolean accepted = false; - SnapshotsInProgress.Entry updatedSnapshot; + + SnapshotsInProgress.Entry endSnapshot; String failure = null; @Override @@ -381,17 +381,23 @@ public ClusterState execute(ClusterState currentState) { SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); List entries = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) && entry.state() != State.ABORTED) { - // Replace the snapshot that was just created + if (entry.snapshot().equals(snapshot.snapshot()) == false) { + entries.add(entry); + continue; + } + + if (entry.state() != State.ABORTED) { + // Replace the snapshot that was just intialized ImmutableOpenMap shards = shards(currentState, entry.indices()); if (!partial) { Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metaData()); Set missing = indicesWithMissingShards.v1(); Set closed = indicesWithMissingShards.v2(); if (missing.isEmpty() == false || closed.isEmpty() == false) { - StringBuilder failureMessage = new StringBuilder(); - updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); - entries.add(updatedSnapshot); + endSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); + entries.add(endSnapshot); + + final StringBuilder failureMessage = new StringBuilder(); if (missing.isEmpty() == false) { failureMessage.append("Indices don't have primary shards "); failureMessage.append(missing); @@ -407,13 +413,16 @@ public ClusterState execute(ClusterState currentState) { continue; } } - updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); + SnapshotsInProgress.Entry updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); entries.add(updatedSnapshot); - if (!completed(shards.values())) { - accepted = true; + if (completed(shards.values())) { + endSnapshot = updatedSnapshot; } } else { - entries.add(entry); + assert entry.state() == State.ABORTED : "expecting snapshot to be aborted during initialization"; + failure = "snapshot was aborted during initialization"; + endSnapshot = entry; + entries.add(endSnapshot); } } return ClusterState.builder(currentState) @@ -448,8 +457,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // We should end snapshot only if 1) we didn't accept it for processing (which happens when there // is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should // go ahead and continue working on this snapshot rather then end here. - if (!accepted && updatedSnapshot != null) { - endSnapshot(updatedSnapshot, failure); + if (endSnapshot != null) { + endSnapshot(endSnapshot, failure); } } }); @@ -750,6 +759,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { } entries.add(updatedSnapshot); } else if (snapshot.state() == State.INIT && newMaster) { + changed = true; + // Mark the snapshot as aborted as it failed to start from the previous master + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); + entries.add(updatedSnapshot); + // Clean up the snapshot that failed to start from the old master deleteSnapshot(snapshot.snapshot(), new DeleteSnapshotListener() { @Override @@ -935,7 +949,7 @@ private Tuple, Set> indicesWithMissingShards(ImmutableOpenMa * * @param entry snapshot */ - void endSnapshot(SnapshotsInProgress.Entry entry) { + void endSnapshot(final SnapshotsInProgress.Entry entry) { endSnapshot(entry, null); } @@ -1144,24 +1158,26 @@ public ClusterState execute(ClusterState currentState) throws Exception { } else { // This snapshot is currently running - stopping shards first waitForSnapshot = true; - ImmutableOpenMap shards; - if (snapshotEntry.state() == State.STARTED && snapshotEntry.shards() != null) { - // snapshot is currently running - stop started shards - ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); + + final ImmutableOpenMap shards; + + final State state = snapshotEntry.state(); + if (state == State.INIT) { + // snapshot is still initializing, mark it as aborted + shards = snapshotEntry.shards(); + + } else if (state == State.STARTED) { + // snapshot is started - mark every non completed shard as aborted + final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); for (ObjectObjectCursor shardEntry : snapshotEntry.shards()) { ShardSnapshotStatus status = shardEntry.value; - if (!status.state().completed()) { - shardsBuilder.put(shardEntry.key, new ShardSnapshotStatus(status.nodeId(), State.ABORTED, - "aborted by snapshot deletion")); - } else { - shardsBuilder.put(shardEntry.key, status); + if (status.state().completed() == false) { + status = new ShardSnapshotStatus(status.nodeId(), State.ABORTED, "aborted by snapshot deletion"); } + shardsBuilder.put(shardEntry.key, status); } shards = shardsBuilder.build(); - } else if (snapshotEntry.state() == State.INIT) { - // snapshot hasn't started yet - end it - shards = snapshotEntry.shards(); - endSnapshot(snapshotEntry); + } else { boolean hasUncompletedShards = false; // Cleanup in case a node gone missing and snapshot wasn't updated for some reason @@ -1178,7 +1194,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { logger.debug("trying to delete completed snapshot - should wait for shards to finalize on all nodes"); return currentState; } else { - // no shards to wait for - finish the snapshot + // no shards to wait for but a node is gone - this is the only case + // where we force to finish the snapshot logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); shards = snapshotEntry.shards(); endSnapshot(snapshotEntry); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 19c842a277afe..1b3a35ff160fb 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3151,7 +3151,7 @@ public void testSnapshottingWithMissingSequenceNumbers() { assertThat(shardStats.getSeqNoStats().getMaxSeqNo(), equalTo(15L)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/27974") + @TestLogging("org.elasticsearch.snapshots:TRACE") public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { final Client client = client(); @@ -3163,11 +3163,11 @@ public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { )); createIndex("test-idx"); - final int nbDocs = scaledRandomIntBetween(1, 100); + final int nbDocs = scaledRandomIntBetween(100, 500); for (int i = 0; i < nbDocs; i++) { index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i); } - refresh(); + flushAndRefresh("test-idx"); assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo((long) nbDocs)); // Create a snapshot From 49636992acf93c3880fb1ea816b556be8f2ab36e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 8 Jan 2018 09:39:34 -0500 Subject: [PATCH 09/16] Fail rollover if duplicated alias found in template (#28110) If a newly created index from a rollover request matches with an index template whose aliases contains the rollover request alias, the alias will point to multiple indices. This will cause indexing requests to be rejected. To avoid such situation, we make sure that there is no duplicated alias before creating a new index; otherwise abort and report an error to the caller. Closes #26976 --- .../rollover/TransportRolloverAction.java | 18 ++++++++++++++++++ .../metadata/MetaDataCreateIndexService.java | 18 +----------------- .../MetaDataIndexTemplateService.java | 19 +++++++++++++++++++ .../admin/indices/rollover/RolloverIT.java | 11 +++++++++++ .../TransportRolloverActionTests.java | 16 ++++++++++++++++ .../MetaDataIndexTemplateServiceTests.java | 19 +++++++++++++++++-- 6 files changed, 82 insertions(+), 19 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 48d03f3ac94c6..2ed5192e6cfb2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -37,9 +37,11 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; +import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -115,6 +117,7 @@ protected void masterOperation(final RolloverRequest rolloverRequest, final Clus : generateRolloverIndexName(sourceProvidedName, indexNameExpressionResolver); final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName); MetaDataCreateIndexService.validateIndexName(rolloverIndexName, state); // will fail if the index already exists + checkNoDuplicatedAliasInIndexTemplate(metaData, rolloverIndexName, rolloverRequest.getAlias()); client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute( new ActionListener() { @Override @@ -238,4 +241,19 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Stri .mappings(createIndexRequest.mappings()); } + /** + * If the newly created index matches with an index template whose aliases contains the rollover alias, + * the rollover alias will point to multiple indices. This causes indexing requests to be rejected. + * To avoid this, we make sure that there is no duplicated alias in index templates before creating a new index. + */ + static void checkNoDuplicatedAliasInIndexTemplate(MetaData metaData, String rolloverIndexName, String rolloverRequestAlias) { + final List matchedTemplates = MetaDataIndexTemplateService.findTemplates(metaData, rolloverIndexName); + for (IndexTemplateMetaData template : matchedTemplates) { + if (template.aliases().containsKey(rolloverRequestAlias)) { + throw new IllegalArgumentException(String.format(Locale.ROOT, + "Rollover alias [%s] can point to multiple indices, found duplicated alias [%s] in index template [%s]", + rolloverRequestAlias, template.aliases().keys(), template.name())); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 4ef451e19478b..28a7570ca5582 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -277,7 +277,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { // we only find a template when its an API call (a new index) // find templates, highest order are better matching - List templates = findTemplates(request, currentState); + List templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); Map customs = new HashMap<>(); @@ -564,22 +564,6 @@ public void onFailure(String source, Exception e) { } super.onFailure(source, e); } - - private List findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws IOException { - List templateMetadata = new ArrayList<>(); - for (ObjectCursor cursor : state.metaData().templates().values()) { - IndexTemplateMetaData metadata = cursor.value; - for (String template: metadata.patterns()) { - if (Regex.simpleMatch(template, request.index())) { - templateMetadata.add(metadata); - break; - } - } - } - - CollectionUtil.timSort(templateMetadata, Comparator.comparingInt(IndexTemplateMetaData::order).reversed()); - return templateMetadata; - } } private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 883d7f2fc47ec..9d8da37cbeeba 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -20,6 +20,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -48,6 +49,7 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -193,6 +195,23 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }); } + /** + * Finds index templates whose index pattern matched with the given index name. + * The result is sorted by {@link IndexTemplateMetaData#order} descending. + */ + public static List findTemplates(MetaData metaData, String indexName) { + final List matchedTemplates = new ArrayList<>(); + for (ObjectCursor cursor : metaData.templates().values()) { + final IndexTemplateMetaData template = cursor.value; + final boolean matched = template.patterns().stream().anyMatch(pattern -> Regex.simpleMatch(pattern, indexName)); + if (matched) { + matchedTemplates.add(template); + } + } + CollectionUtil.timSort(matchedTemplates, Comparator.comparingInt(IndexTemplateMetaData::order).reversed()); + return matchedTemplates; + } + private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder, IndicesService indicesService, NamedXContentRegistry xContentRegistry) throws Exception { Index createdIndex = null; diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index c047611f71932..e2c31db81ce60 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -277,4 +277,15 @@ public void testRolloverMaxSize() throws Exception { assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); } } + + public void testRejectIfAliasFoundInTemplate() throws Exception { + client().admin().indices().preparePutTemplate("logs") + .setPatterns(Collections.singletonList("logs-*")).addAlias(new Alias("logs-write")).get(); + assertAcked(client().admin().indices().prepareCreate("logs-000001").get()); + ensureYellow("logs-write"); + final IllegalArgumentException error = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareRolloverIndex("logs-write").addMaxIndexSizeCondition(new ByteSizeValue(1)).get()); + assertThat(error.getMessage(), equalTo( + "Rollover alias [logs-write] can point to multiple indices, found duplicated alias [[logs-write]] in index template [logs]")); + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index dcb3a87df74f4..3366646e24a79 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; @@ -40,11 +41,13 @@ import org.elasticsearch.test.ESTestCase; import org.mockito.ArgumentCaptor; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Set; import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Matchers.any; @@ -241,6 +244,19 @@ public void testCreateIndexRequest() throws Exception { assertThat(createIndexRequest.cause(), equalTo("rollover_index")); } + public void testRejectDuplicateAlias() throws Exception { + final IndexTemplateMetaData template = IndexTemplateMetaData.builder("test-template") + .patterns(Arrays.asList("foo-*", "bar-*")) + .putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write")) + .build(); + final MetaData metaData = MetaData.builder().put(createMetaData(), false).put(template).build(); + String indexName = randomFrom("foo-123", "bar-xyz"); + String aliasName = randomFrom("foo-write", "bar-write"); + final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> TransportRolloverAction.checkNoDuplicatedAliasInIndexTemplate(metaData, indexName, aliasName)); + assertThat(ex.getMessage(), containsString("index template [test-template]")); + } + private IndicesStatsResponse createIndicesStatResponse(long totalDocs, long primaryDocs) { final CommonStats primaryStats = mock(CommonStats.class); when(primaryStats.getDocs()).thenReturn(new DocsStats(primaryDocs, 0, between(1, 10000))); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java index 58012909b8f2e..d3c133915e7b8 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java @@ -20,8 +20,10 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService.PutRequest; @@ -38,16 +40,17 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { public void testIndexTemplateInvalidNumberOfShards() { @@ -154,6 +157,18 @@ public void testAliasInvalidFilterInvalidJson() throws Exception { assertThat(errors.get(0).getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); } + public void testFindTemplates() throws Exception { + client().admin().indices().prepareDeleteTemplate("*").get(); // Delete all existing templates + putTemplateDetail(new PutRequest("test", "foo-1").patterns(Arrays.asList("foo-*")).order(1)); + putTemplateDetail(new PutRequest("test", "foo-2").patterns(Arrays.asList("foo-*")).order(2)); + putTemplateDetail(new PutRequest("test", "bar").patterns(Arrays.asList("bar-*")).order(between(0, 100))); + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertThat(MetaDataIndexTemplateService.findTemplates(state.metaData(), "foo-1234").stream() + .map(IndexTemplateMetaData::name).collect(Collectors.toList()), contains("foo-2", "foo-1")); + assertThat(MetaDataIndexTemplateService.findTemplates(state.metaData(), "bar-xyz").stream() + .map(IndexTemplateMetaData::name).collect(Collectors.toList()), contains("bar")); + assertThat(MetaDataIndexTemplateService.findTemplates(state.metaData(), "baz"), empty()); + } private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService( From a58dc8d82cc3e89dbbd30adcd444371a3d10f614 Mon Sep 17 00:00:00 2001 From: Andrew Banchich Date: Mon, 8 Jan 2018 10:55:48 -0500 Subject: [PATCH 10/16] [Docs] Fix Date Math example descriptions (#28125) --- docs/reference/api-conventions.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index db138912683f3..7cd608d0bb533 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -218,8 +218,8 @@ The supported units are: Assuming `now` is `2001-01-01 12:00:00`, some examples are: `now+1h`:: `now` in milliseconds plus one hour. Resolves to: `2001-01-01 13:00:00` -`now-1h`:: `now` in milliseconds plus one hour. Resolves to: `2001-01-01 11:00:00` -`now-1h/d`:: `now` in milliseconds rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00`` +`now-1h`:: `now` in milliseconds minus one hour. Resolves to: `2001-01-01 11:00:00` +`now-1h/d`:: `now` in milliseconds minus one hour, rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00`` `2001-01-01\|\|+1M/d`:: `now` in milliseconds plus one month. Resolves to: `2001-02-01 00:00:00` [float] From af3c2fcd65fa16b19c3719c87f701e8de0cfdc2f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 8 Jan 2018 10:48:16 -0500 Subject: [PATCH 11/16] Fix expected plugins test for transport-nio We have a packaging test that tries to install all plugins, and then asserts that all expected plugins are installed. The expected plugins are dervied from the list of plugins in the plugins sub-project. The plugin transport-nio was recently added here, but explicit commands to install and remove this plugin were never added. This commit addresses this. --- .../packaging/tests/module_and_plugin_test_cases.bash | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 767ebf95dd6fe..fb721d5c6d9ad 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -271,6 +271,10 @@ fi install_and_check_plugin store smb } +@test "[$GROUP] install transport-nio plugin" { + install_and_check_plugin transport nio +} + @test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" { "$ESHOME/bin/elasticsearch-plugin" list | cut -d'@' -f1 > /tmp/installed compare_plugins_list "/tmp/installed" "'plugins list'" @@ -373,6 +377,10 @@ fi remove_plugin store-smb } +@test "[$GROUP] remove transport-nio plugin" { + remove_plugin transport-nio +} + @test "[$GROUP] start elasticsearch with all plugins removed" { start_elasticsearch_service } From ff3db0b50ecb654e6f393c5200156a8aefb059eb Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 8 Jan 2018 10:18:19 -0700 Subject: [PATCH 12/16] Cleanup TcpChannelFactory and remove classes (#28102) This commit is related to #27260. It moves the TcpChannelFactory into NioTransport so that consumers do not have to be passed around. Additionally it deletes an unused read handler. --- .../transport/nio/NioTransport.java | 57 ++++++++++----- .../transport/nio/TcpChannelFactory.java | 69 ------------------- .../nio/TcpNioServerSocketChannel.java | 4 +- .../transport/nio/TcpReadHandler.java | 48 ------------- .../transport/nio/NioTransportIT.java | 1 - 5 files changed, 42 insertions(+), 137 deletions(-) delete mode 100644 plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpChannelFactory.java delete mode 100644 plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index d44a606e3c916..42063878b4b2f 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -33,14 +33,17 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesReadContext; import org.elasticsearch.nio.BytesWriteContext; +import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ReadContext; import org.elasticsearch.nio.SocketEventHandler; +import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; @@ -49,8 +52,9 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; import java.util.concurrent.ConcurrentMap; -import java.util.function.Consumer; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Setting.intSetting; @@ -110,13 +114,13 @@ protected void doStart() { NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); - clientChannelFactory = new TcpChannelFactory(clientProfileSettings, getContextSetter(), getServerContextSetter()); + clientChannelFactory = new TcpChannelFactory(clientProfileSettings); if (useNetworkServer) { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { String profileName = profileSettings.profileName; - TcpChannelFactory factory = new TcpChannelFactory(profileSettings, getContextSetter(), getServerContextSetter()); + TcpChannelFactory factory = new TcpChannelFactory(profileSettings); profileToChannelFactory.putIfAbsent(profileName, factory); bindServer(profileSettings); } @@ -143,29 +147,46 @@ protected void stopInternal() { profileToChannelFactory.clear(); } - final void exceptionCaught(NioSocketChannel channel, Exception exception) { + private void exceptionCaught(NioSocketChannel channel, Exception exception) { onException((TcpChannel) channel, exception); } - private Consumer getContextSetter() { - return (c) -> { + private void acceptChannel(NioSocketChannel channel) { + serverAcceptedChannel((TcpNioSocketChannel) channel); + } + + private class TcpChannelFactory extends ChannelFactory { + + private final String profileName; + + TcpChannelFactory(TcpTransport.ProfileSettings profileSettings) { + super(new RawChannelFactory(profileSettings.tcpNoDelay, + profileSettings.tcpKeepAlive, + profileSettings.reuseAddress, + Math.toIntExact(profileSettings.sendBufferSize.getBytes()), + Math.toIntExact(profileSettings.receiveBufferSize.getBytes()))); + this.profileName = profileSettings.profileName; + } + + @Override + public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + TcpNioSocketChannel nioChannel = new TcpNioSocketChannel(profileName, channel, selector); Supplier pageSupplier = () -> { Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> - consumeNetworkReads(c, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); - BytesReadContext readContext = new BytesReadContext(c, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); - c.setContexts(readContext, new BytesWriteContext(c), this::exceptionCaught); - }; - } - - private void acceptChannel(NioSocketChannel channel) { - serverAcceptedChannel((TcpNioSocketChannel) channel); - - } + consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); + BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); + nioChannel.setContexts(readContext, new BytesWriteContext(nioChannel), NioTransport.this::exceptionCaught); + return nioChannel; + } - private Consumer getServerContextSetter() { - return (c) -> c.setAcceptContext(this::acceptChannel); + @Override + public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); + nioServerChannel.setAcceptContext(NioTransport.this::acceptChannel); + return nioServerChannel; + } } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpChannelFactory.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpChannelFactory.java deleted file mode 100644 index 44c72efb857e9..0000000000000 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpChannelFactory.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.nio; - -import org.elasticsearch.nio.AcceptingSelector; -import org.elasticsearch.nio.ChannelFactory; -import org.elasticsearch.nio.SocketSelector; -import org.elasticsearch.transport.TcpTransport; - -import java.io.IOException; -import java.nio.channels.ServerSocketChannel; -import java.nio.channels.SocketChannel; -import java.util.function.Consumer; - -/** - * This is an implementation of {@link ChannelFactory} which returns channels that adhere to the - * {@link org.elasticsearch.transport.TcpChannel} interface. The channels will use the provided - * {@link TcpTransport.ProfileSettings}. The provided context setters will be called with the channel after - * construction. - */ -public class TcpChannelFactory extends ChannelFactory { - - private final Consumer contextSetter; - private final Consumer serverContextSetter; - private final String profileName; - - TcpChannelFactory(TcpTransport.ProfileSettings profileSettings, Consumer contextSetter, - Consumer serverContextSetter) { - super(new RawChannelFactory(profileSettings.tcpNoDelay, - profileSettings.tcpKeepAlive, - profileSettings.reuseAddress, - Math.toIntExact(profileSettings.sendBufferSize.getBytes()), - Math.toIntExact(profileSettings.receiveBufferSize.getBytes()))); - this.profileName = profileSettings.profileName; - this.contextSetter = contextSetter; - this.serverContextSetter = serverContextSetter; - } - - @Override - public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { - TcpNioSocketChannel nioChannel = new TcpNioSocketChannel(profileName, channel, selector); - contextSetter.accept(nioChannel); - return nioChannel; - } - - @Override - public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { - TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); - serverContextSetter.accept(nioServerChannel); - return nioServerChannel; - } -} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java index 4aa9795dda496..7f657c763486d 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.nio.AcceptingSelector; +import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.transport.TcpChannel; @@ -37,7 +38,8 @@ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements private final String profile; - TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, TcpChannelFactory channelFactory, + TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, + ChannelFactory channelFactory, AcceptingSelector selector) throws IOException { super(socketChannel, channelFactory, selector); this.profile = profile; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java deleted file mode 100644 index 37478286831ad..0000000000000 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.nio; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.nio.NioSocketChannel; - -import java.io.IOException; - -public class TcpReadHandler { - - private final String profile; - private final NioTransport transport; - - TcpReadHandler(String profile, NioTransport transport) { - this.profile = profile; - this.transport = transport; - } - - public void handleMessage(BytesReference reference, TcpNioSocketChannel channel, int messageBytesLength) { - try { - transport.messageReceived(reference, channel); - } catch (IOException e) { - handleException(channel, e); - } - } - - public void handleException(NioSocketChannel channel, Exception e) { - transport.exceptionCaught(channel, e); - } -} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java index 0712717eb6405..df53a4d79c7ad 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java @@ -128,5 +128,4 @@ protected void validateRequest(StreamInput buffer, long requestId, String action } } - } From c46222ea158c1397ab7abe0b3b9833209255f7c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maxime=20Gr=C3=A9au?= Date: Mon, 8 Jan 2018 19:36:33 +0100 Subject: [PATCH 13/16] Fix Licenses values for CDDL and Custom URL (#27999) * Fix license SPDX identifiers (CDDL) * Fix license type for Custom URL: * If the license is identified but not listed as an SPDX identifier, the character `;` is used after the identifier to set the license URL. --- .../org/elasticsearch/gradle/DependenciesInfoTask.groovy | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy index ddd5248396ce3..eb82b4675f287 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy @@ -88,7 +88,7 @@ public class DependenciesInfoTask extends DefaultTask { * *
  • UNKNOWN if LICENSE file is not present for this dependency.
  • *
  • one SPDX identifier if the LICENSE content matches with an SPDX license.
  • - *
  • Custom:URL if it's not an SPDX license, + *
  • Custom;URL if it's not an SPDX license, * URL is the Github URL to the LICENSE file in elasticsearch repository.
  • * * @@ -116,7 +116,7 @@ public class DependenciesInfoTask extends DefaultTask { // As we have the license file, we create a Custom entry with the URL to this license file. final gitBranch = System.getProperty('build.branch', 'master') final String githubBaseURL = "https://raw.githubusercontent.com/elastic/elasticsearch/${gitBranch}/" - return "Custom:${license.getCanonicalPath().replaceFirst('.*/elasticsearch/', githubBaseURL)}" + return "Custom;${license.getCanonicalPath().replaceFirst('.*/elasticsearch/', githubBaseURL)}" } return spdx } else { @@ -156,10 +156,10 @@ public class DependenciesInfoTask extends DefaultTask { spdx = 'LGPL-3.0' break case ~/.*${CDDL_1_0}.*/: - spdx = 'CDDL_1_0' + spdx = 'CDDL-1.0' break case ~/.*${CDDL_1_1}.*/: - spdx = 'CDDL_1_1' + spdx = 'CDDL-1.1' break case ~/.*${ICU}.*/: spdx = 'ICU' From 06d1ed8ba889829e281f3194f2cc288bb482ad38 Mon Sep 17 00:00:00 2001 From: Nathan Gass Date: Tue, 9 Jan 2018 01:41:35 +0100 Subject: [PATCH 14/16] Fix upgrading indices which use a custom similarity plugin. (#26985) Use a fake similarity map that always returns a value in MetaDataIndexUpgradeService.checkMappingsCompatibility instead of an empty map. Closes #25350 --- .../metadata/MetaDataIndexUpgradeService.java | 39 ++++++++++++++++--- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index d58ed04a930f9..e17b9fbb4d56a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.indices.mapper.MapperRegistry; import java.util.AbstractMap; @@ -132,19 +133,43 @@ private static boolean isSupportedVersion(IndexMetaData indexMetaData, Version m */ private void checkMappingsCompatibility(IndexMetaData indexMetaData) { try { - // We cannot instantiate real analysis server at this point because the node might not have - // been started yet. However, we don't really need real analyzers at this stage - so we can fake it + + // We cannot instantiate real analysis server or similiarity service at this point because the node + // might not have been started yet. However, we don't really need real analyzers or similarities at + // this stage - so we can fake it using constant maps accepting every key. + // This is ok because all used similarities and analyzers for this index were known before the upgrade. + // Missing analyzers and similarities plugin will still trigger the apropriate error during the + // actual upgrade. + IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings); - SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); + + final Map similarityMap = new AbstractMap() { + @Override + public boolean containsKey(Object key) { + return true; + } + + @Override + public SimilarityProvider.Factory get(Object key) { + assert key instanceof String : "key must be a string but was: " + key.getClass(); + return SimilarityService.BUILT_IN.get(SimilarityService.DEFAULT_SIMILARITY); + } + + // this entrySet impl isn't fully correct but necessary as SimilarityService will iterate + // over all similarities + @Override + public Set> entrySet() { + return Collections.emptySet(); + } + }; + SimilarityService similarityService = new SimilarityService(indexSettings, null, similarityMap); final NamedAnalyzer fakeDefault = new NamedAnalyzer("default", AnalyzerScope.INDEX, new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { throw new UnsupportedOperationException("shouldn't be here"); } }); - // this is just a fake map that always returns the same value for any possible string key - // also the entrySet impl isn't fully correct but we implement it since internally - // IndexAnalyzers will iterate over all analyzers to close them. + final Map analyzerMap = new AbstractMap() { @Override public NamedAnalyzer get(Object key) { @@ -152,6 +177,8 @@ public NamedAnalyzer get(Object key) { return new NamedAnalyzer((String)key, AnalyzerScope.INDEX, fakeDefault.analyzer()); } + // this entrySet impl isn't fully correct but necessary as IndexAnalyzers will iterate + // over all analyzers to close them @Override public Set> entrySet() { return Collections.emptySet(); From 1d1dcd4ae7de849efaa92b8fb6495bdc68d020ba Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 8 Jan 2018 17:54:45 -0800 Subject: [PATCH 15/16] Painless: Add a simple cache for whitelist methods and fields. (#28142) With support for multiple contexts we are adding some caching to the whitelist to keep the memory footprint for definitions from exploding. --- .../elasticsearch/painless/Definition.java | 64 ++++++++++++++----- 1 file changed, 49 insertions(+), 15 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 34385dbf7acfc..7d8b4ff4e614e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -41,6 +41,9 @@ */ public final class Definition { + private static final Map methodCache = new HashMap<>(); + private static final Map fieldCache = new HashMap<>(); + private static final Pattern TYPE_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); public static final String[] DEFINITION_FILES = new String[] { @@ -533,6 +536,22 @@ Collection allSimpleTypes() { return simpleTypesMap.values(); } + private static String buildMethodCacheKey(String structName, String methodName, List arguments) { + StringBuilder key = new StringBuilder(); + key.append(structName); + key.append(methodName); + + for (Type argument : arguments) { + key.append(argument.name); + } + + return key.toString(); + } + + private static String buildFieldCacheKey(String structName, String fieldName, String typeName) { + return structName + fieldName + typeName; + } + // INTERNAL IMPLEMENTATION: private final Map, RuntimeClass> runtimeMap; @@ -836,8 +855,10 @@ private void addConstructor(String ownerStructName, Whitelist.Constructor whitel " with constructor parameters " + whitelistConstructor.painlessParameterTypeNames); } - painlessConstructor = new Method("", ownerStruct, null, getTypeInternal("void"), painlessParametersTypes, - asmConstructor, javaConstructor.getModifiers(), javaHandle); + painlessConstructor = methodCache.computeIfAbsent(buildMethodCacheKey(ownerStruct.name, "", painlessParametersTypes), + key -> new Method("", ownerStruct, null, getTypeInternal("void"), painlessParametersTypes, + asmConstructor, javaConstructor.getModifiers(), javaHandle)); + ownerStruct.constructors.put(painlessMethodKey, painlessConstructor); } else if (painlessConstructor.arguments.equals(painlessParametersTypes) == false){ throw new IllegalArgumentException( @@ -859,7 +880,7 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, " [" + whitelistMethod.javaMethodName + "] for owner struct [" + ownerStructName + "]."); } - Class javaAugmentedClass = null; + Class javaAugmentedClass; if (whitelistMethod.javaAugmentedClassName != null) { try { @@ -869,6 +890,8 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, "not found for method with name [" + whitelistMethod.javaMethodName + "] " + "and parameters " + whitelistMethod.painlessParameterTypeNames, cnfe); } + } else { + javaAugmentedClass = null; } int augmentedOffset = javaAugmentedClass == null ? 0 : 1; @@ -939,8 +962,10 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, "[" + whitelistMethod.javaMethodName + "] and parameters " + whitelistMethod.painlessParameterTypeNames); } - painlessMethod = new Method(whitelistMethod.javaMethodName, ownerStruct, null, painlessReturnType, - painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle); + painlessMethod = methodCache.computeIfAbsent( + buildMethodCacheKey(ownerStruct.name, whitelistMethod.javaMethodName, painlessParametersTypes), + key -> new Method(whitelistMethod.javaMethodName, ownerStruct, null, painlessReturnType, painlessParametersTypes, + asmMethod, javaMethod.getModifiers(), javaMethodHandle)); ownerStruct.staticMethods.put(painlessMethodKey, painlessMethod); } else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) && painlessMethod.arguments.equals(painlessParametersTypes)) == false) { @@ -963,8 +988,10 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, "[" + whitelistMethod.javaMethodName + "] and parameters " + whitelistMethod.painlessParameterTypeNames); } - painlessMethod = new Method(whitelistMethod.javaMethodName, ownerStruct, javaAugmentedClass, painlessReturnType, - painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle); + painlessMethod = methodCache.computeIfAbsent( + buildMethodCacheKey(ownerStruct.name, whitelistMethod.javaMethodName, painlessParametersTypes), + key -> new Method(whitelistMethod.javaMethodName, ownerStruct, javaAugmentedClass, painlessReturnType, + painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle)); ownerStruct.methods.put(painlessMethodKey, painlessMethod); } else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) && painlessMethod.arguments.equals(painlessParametersTypes)) == false) { @@ -1016,33 +1043,40 @@ private void addField(String ownerStructName, Whitelist.Field whitelistField) { Field painlessField = ownerStruct.staticMembers.get(whitelistField.javaFieldName); if (painlessField == null) { - painlessField = new Field(whitelistField.javaFieldName, javaField.getName(), - ownerStruct, painlessFieldType, javaField.getModifiers(), null, null); + painlessField = fieldCache.computeIfAbsent( + buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldType.name), + key -> new Field(whitelistField.javaFieldName, javaField.getName(), + ownerStruct, painlessFieldType, javaField.getModifiers(), null, null)); ownerStruct.staticMembers.put(whitelistField.javaFieldName, painlessField); } else if (painlessField.type.equals(painlessFieldType) == false) { throw new IllegalArgumentException("illegal duplicate static fields [" + whitelistField.javaFieldName + "] " + "found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]"); } } else { - MethodHandle javaMethodHandleGetter = null; - MethodHandle javaMethodHandleSetter = null; + MethodHandle javaMethodHandleGetter; + MethodHandle javaMethodHandleSetter; try { if (Modifier.isStatic(javaField.getModifiers()) == false) { javaMethodHandleGetter = MethodHandles.publicLookup().unreflectGetter(javaField); javaMethodHandleSetter = MethodHandles.publicLookup().unreflectSetter(javaField); + } else { + javaMethodHandleGetter = null; + javaMethodHandleSetter = null; } } catch (IllegalAccessException exception) { throw new IllegalArgumentException("getter/setter [" + whitelistField.javaFieldName + "]" + " not found for class [" + ownerStruct.clazz.getName() + "]."); } - Field painlessField = ownerStruct.staticMembers.get(whitelistField.javaFieldName); + Field painlessField = ownerStruct.members.get(whitelistField.javaFieldName); if (painlessField == null) { - painlessField = new Field(whitelistField.javaFieldName, javaField.getName(), - ownerStruct, painlessFieldType, javaField.getModifiers(), javaMethodHandleGetter, javaMethodHandleSetter); - ownerStruct.staticMembers.put(whitelistField.javaFieldName, painlessField); + painlessField = fieldCache.computeIfAbsent( + buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldType.name), + key -> new Field(whitelistField.javaFieldName, javaField.getName(), + ownerStruct, painlessFieldType, javaField.getModifiers(), javaMethodHandleGetter, javaMethodHandleSetter)); + ownerStruct.members.put(whitelistField.javaFieldName, painlessField); } else if (painlessField.type.equals(painlessFieldType) == false) { throw new IllegalArgumentException("illegal duplicate member fields [" + whitelistField.javaFieldName + "] " + "found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]"); From a85772cbe5cf55719281647370a09babe44b49ca Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 8 Jan 2018 21:47:22 -0500 Subject: [PATCH 16/16] Use Gradle wrapper when building BWC This commit modifies the BWC build to invoke the Gradle wrapper. The motivation for this is two-fold: - BWC versions might be dependent on a different version of Gradle than the current version of Gradle - in a follow-up we are going to need to be able to set JAVA_HOME to a different value than the current value of JAVA_HOME Relates #28138 --- distribution/bwc/build.gradle | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index a2e88dc38a511..4c6323c337e16 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -115,17 +115,34 @@ if (project.hasProperty('bwcVersion')) { File bwcDeb = file("${checkoutDir}/distribution/deb/build/distributions/elasticsearch-${bwcVersion}.deb") File bwcRpm = file("${checkoutDir}/distribution/rpm/build/distributions/elasticsearch-${bwcVersion}.rpm") File bwcZip = file("${checkoutDir}/distribution/zip/build/distributions/elasticsearch-${bwcVersion}.zip") - task buildBwcVersion(type: GradleBuild) { + task buildBwcVersion(type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata - dir = checkoutDir - tasks = [':distribution:deb:assemble', ':distribution:rpm:assemble', ':distribution:zip:assemble'] - startParameter.systemPropertiesArgs = ['build.snapshot': System.getProperty("build.snapshot") ?: "true"] + workingDir = checkoutDir + executable = new File(checkoutDir, 'gradlew').toString() + final ArrayList commandLineArgs = [ + ":distribution:deb:assemble", + ":distribution:rpm:assemble", + ":distribution:zip:assemble", + "-Dbuild.snapshot=${System.getProperty('build.snapshot') ?: 'true'}"] + final LogLevel logLevel = gradle.startParameter.logLevel + if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { + commandLineArgs << "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + } + final String showStacktraceName = gradle.startParameter.showStacktrace.name() + assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) + if (showStacktraceName.equals("ALWAYS")) { + commandLineArgs << "--stacktrace" + } else if (showStacktraceName.equals("ALWAYS_FULL")) { + commandLineArgs << "--full-stacktrace" + } + args = commandLineArgs doLast { List missing = [bwcDeb, bwcRpm, bwcZip].grep { file -> - false == file.exists() } + false == file.exists() + } if (false == missing.empty) { throw new InvalidUserDataException( - "Building bwc version didn't generate expected files ${missing}") + "Building bwc version didn't generate expected files ${missing}") } } }