From f539e8e9951c947c80b77af41a9158099cef02d6 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 24 Jul 2019 16:09:51 +0100 Subject: [PATCH 01/51] Fix testFirstListElementsToCommaDelimitedStringReportsFirstElementsIfLong (#44785) This test can fail (super-rarely) if it generates a list of length 11 containing a duplicate, because the `.distinct()` reduces the list length to 10 and then it is not abbreviated any more. This change generalises the test to cover lists of any random length. --- .../routing/allocation/AllocationServiceTests.java | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java index 6f6b6bb39bd9a..5dcdeda90e2c2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java @@ -49,8 +49,8 @@ private void assertAllElementsReported(List strings, boolean isDebugEnab } public void testFirstListElementsToCommaDelimitedStringReportsFirstElementsIfLong() { - List strings = IntStream.range(0, between(11, 100)).mapToObj(i -> randomAlphaOfLength(10)) - .distinct().collect(Collectors.toList()); + List strings = IntStream.range(0, between(0, 100)) + .mapToObj(i -> randomAlphaOfLength(between(6, 10))).distinct().collect(Collectors.toList()); final String abbreviated = AllocationService.firstListElementsToCommaDelimitedString(strings, Function.identity(), false); for (int i = 0; i < strings.size(); i++) { if (i < 10) { @@ -59,8 +59,13 @@ public void testFirstListElementsToCommaDelimitedStringReportsFirstElementsIfLon assertThat(abbreviated, not(containsString(strings.get(i)))); } } - assertThat(abbreviated, containsString("...")); - assertThat(abbreviated, containsString("[" + strings.size() + " items in total]")); + + if (strings.size() > 10) { + assertThat(abbreviated, containsString("...")); + assertThat(abbreviated, containsString("[" + strings.size() + " items in total]")); + } else { + assertThat(abbreviated, not(containsString("..."))); + } } public void testFirstListElementsToCommaDelimitedStringUsesFormatterNotToString() { From e5cc3eb4040950296694e396d1a71702661e0a5a Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 24 Jul 2019 17:35:21 +0200 Subject: [PATCH 02/51] Cleanup Dead Code in Index Creation (#44784) * Cleanup Dead Code in Index Creation * This is all unused and the state of a create request is always `OPEN` --- .../CreateIndexClusterStateUpdateRequest.java | 25 +------------- .../create/TransportCreateIndexAction.java | 2 +- .../rollover/TransportRolloverAction.java | 2 +- .../indices/shrink/TransportResizeAction.java | 2 +- .../metadata/MetaDataCreateIndexService.java | 33 +++++++------------ .../snapshots/RestoreService.java | 5 +-- .../metadata/IndexCreationTaskTests.java | 8 +++-- 7 files changed, 22 insertions(+), 55 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 25f7f33647c25..f9449bae8bef3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; -import org.elasticsearch.transport.TransportMessage; import java.util.HashMap; import java.util.HashSet; @@ -39,7 +38,6 @@ */ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequest { - private final TransportMessage originalMessage; private final String cause; private final String index; private final String providedName; @@ -47,8 +45,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private ResizeType resizeType; private boolean copySettings; - private IndexMetaData.State state = IndexMetaData.State.OPEN; - private Settings settings = Settings.Builder.EMPTY_SETTINGS; private final Map mappings = new HashMap<>(); @@ -59,8 +55,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; - public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName) { - this.originalMessage = originalMessage; + public CreateIndexClusterStateUpdateRequest(String cause, String index, String providedName) { this.cause = cause; this.index = index; this.providedName = providedName; @@ -81,16 +76,6 @@ public CreateIndexClusterStateUpdateRequest aliases(Set aliases) { return this; } - public CreateIndexClusterStateUpdateRequest blocks(Set blocks) { - this.blocks.addAll(blocks); - return this; - } - - public CreateIndexClusterStateUpdateRequest state(IndexMetaData.State state) { - this.state = state; - return this; - } - public CreateIndexClusterStateUpdateRequest recoverFrom(Index recoverFrom) { this.recoverFrom = recoverFrom; return this; @@ -111,10 +96,6 @@ public CreateIndexClusterStateUpdateRequest copySettings(final boolean copySetti return this; } - public TransportMessage originalMessage() { - return originalMessage; - } - public String cause() { return cause; } @@ -123,10 +104,6 @@ public String index() { return index; } - public IndexMetaData.State state() { - return state; - } - public Settings settings() { return settings; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 92d897e3f0348..3c779002f9197 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -78,7 +78,7 @@ protected void masterOperation(Task task, final CreateIndexRequest request, fina final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); final CreateIndexClusterStateUpdateRequest updateRequest = - new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) + new CreateIndexClusterStateUpdateRequest(cause, indexName, request.index()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()).mappings(request.mappings()) .aliases(request.aliases()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index a2ecc9e678b6d..69a4f87e64f92 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -289,7 +289,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Stri final CreateIndexRequest createIndexRequest = rolloverRequest.getCreateIndexRequest(); createIndexRequest.cause("rollover_index"); createIndexRequest.index(targetIndexName); - return new CreateIndexClusterStateUpdateRequest(createIndexRequest, + return new CreateIndexClusterStateUpdateRequest( "rollover_index", targetIndexName, providedIndexName) .ackTimeout(createIndexRequest.timeout()) .masterNodeTimeout(createIndexRequest.masterNodeTimeout()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index bbbd366ba1b5a..a64b6e65d72d1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -179,7 +179,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi settingsBuilder.put("index.number_of_shards", numShards); targetIndex.settings(settingsBuilder); - return new CreateIndexClusterStateUpdateRequest(targetIndex, cause, targetIndex.index(), targetIndexName) + return new CreateIndexClusterStateUpdateRequest(cause, targetIndex.index(), targetIndexName) // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we // miss the mappings for everything is corrupted and hard to debug diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 3b500056dfbd9..863871314060e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.IndexMetaData.State; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; @@ -294,8 +293,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { List templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); - Map> customs = new HashMap<>(); - // add the request mapping Map> mappings = new HashMap<>(); @@ -541,11 +538,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexMetaDataBuilder.putAlias(aliasMetaData); } - for (Map.Entry> customEntry : customs.entrySet()) { - indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue()); - } - - indexMetaDataBuilder.state(request.state()); + indexMetaDataBuilder.state(IndexMetaData.State.OPEN); final IndexMetaData indexMetaData; try { @@ -576,13 +569,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build(); - if (request.state() == State.OPEN) { - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) - .addAsNew(updatedState.metaData().index(request.index())); - updatedState = allocationService.reroute( - ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), - "index [" + request.index() + "] created"); - } + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) + .addAsNew(updatedState.metaData().index(request.index())); + updatedState = allocationService.reroute( + ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), + "index [" + request.index() + "] created"); removalExtraInfo = "cleaning up after validating index on master"; removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED; return updatedState; @@ -611,11 +602,11 @@ public void onFailure(String source, Exception e) { private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { validateIndexName(request.index(), state); - validateIndexSettings(request.index(), request.settings(), state, forbidPrivateIndexSettings); + validateIndexSettings(request.index(), request.settings(), forbidPrivateIndexSettings); } - public void validateIndexSettings(String indexName, final Settings settings, final ClusterState clusterState, - final boolean forbidPrivateIndexSettings) throws IndexCreationException { + public void validateIndexSettings(String indexName, final Settings settings, final boolean forbidPrivateIndexSettings) + throws IndexCreationException { List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings); if (validationErrors.isEmpty() == false) { @@ -714,9 +705,9 @@ static void validateSplitIndex(ClusterState state, String sourceIndex, IndexMetaData.selectSplitShard(0, sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } - static IndexMetaData validateResize(ClusterState state, String sourceIndex, - Set targetIndexMappingsTypes, String targetIndexName, - Settings targetIndexSettings) { + private static IndexMetaData validateResize(ClusterState state, String sourceIndex, + Set targetIndexMappingsTypes, String targetIndexName, + Settings targetIndexSettings) { if (state.metaData().hasIndex(targetIndexName)) { throw new ResourceAlreadyExistsException(state.metaData().index(targetIndexName).getIndex()); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index ad346426333c5..5f3bc91a9978e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -266,10 +266,7 @@ public ClusterState execute(ClusterState currentState) { // Index doesn't exist - create it and start recovery // Make sure that the index we are about to create has a validate name MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState); - createIndexService.validateIndexSettings(renamedIndexName, - snapshotIndexMetaData.getSettings(), - currentState, - false); + createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings(), false); IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData) .state(IndexMetaData.State.OPEN) .index(renamedIndexName); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 691e23bb87ad1..d4d3643482820 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; import java.io.IOException; import java.util.Arrays; @@ -71,7 +72,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.anyMap; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; @@ -236,10 +239,7 @@ public void testTemplateOrder2() throws Exception { } public void testRequestStateOpen() throws Exception { - when(request.state()).thenReturn(IndexMetaData.State.OPEN); - executeTask(); - verify(allocationService, times(1)).reroute(anyObject(), anyObject()); } @@ -491,5 +491,7 @@ private void setupIndicesService() throws Exception { when(service.getIndexEventListener()).thenReturn(mock(IndexEventListener.class)); when(indicesService.createIndex(anyObject(), anyObject())).thenReturn(service); + when(allocationService.reroute(any(ClusterState.class), anyString())).thenAnswer( + (Answer) invocationOnMock -> (ClusterState) invocationOnMock.getArguments()[0]); } } From 49825cff6df21ef4644d6c17f69b04f01043192b Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 24 Jul 2019 17:43:18 +0200 Subject: [PATCH 03/51] Close connection manager on current thread in RemoteClusterConnection (#44805) The problem is that RemoteClusterConnection closes the connection manager asynchronously, which races with the threadpool being shutdown at the end of the test. Closes #44339 Closes #44610 --- .../transport/ConnectionManager.java | 34 ++++++++++++------- .../transport/RemoteClusterConnection.java | 3 +- .../RemoteClusterConnectionTests.java | 10 +----- x-pack/plugin/ccr/build.gradle | 6 ---- 4 files changed, 24 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index ed26d0b07cdba..f8db0d96c5416 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -58,6 +58,15 @@ public class ConnectionManager implements Closeable { private final AbstractRefCounted connectingRefCounter = new AbstractRefCounted("connection manager") { @Override protected void closeInternal() { + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); + } + } closeLatch.countDown(); } }; @@ -249,22 +258,23 @@ public Set connectedNodes() { @Override public void close() { + internalClose(true); + } + + public void closeNoBlock() { + internalClose(false); + } + + private void internalClose(boolean waitForPendingConnections) { assert Transports.assertNotTransportThread("Closing ConnectionManager"); if (closing.compareAndSet(false, true)) { connectingRefCounter.decRef(); - try { - closeLatch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IllegalStateException(e); - } - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); + if (waitForPendingConnections) { try { - IOUtils.closeWhileHandlingException(next.getValue()); - } finally { - iterator.remove(); + closeLatch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException(e); } } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index d0c26aad54cbc..731aa81179cb8 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -332,8 +332,7 @@ Transport.Connection getConnection() { @Override public void close() throws IOException { IOUtils.close(connectHandler); - // In the ConnectionManager we wait on connections being closed. - threadPool.generic().execute(connectionManager::close); + connectionManager.closeNoBlock(); } public boolean isClosed() { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 37b05dbe128ea..305f8ddc79de3 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -64,7 +64,6 @@ import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.junit.Before; import java.io.IOException; import java.net.InetAddress; @@ -94,6 +93,7 @@ import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.iterableWithSize; @@ -101,7 +101,6 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; import static org.hamcrest.Matchers.startsWith; -import static org.hamcrest.Matchers.endsWith; public class RemoteClusterConnectionTests extends ESTestCase { @@ -114,13 +113,6 @@ public void tearDown() throws Exception { ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - assumeFalse("https://github.com/elastic/elasticsearch/issues/44339", System.getProperty("os.name").contains("Win")); - } - private MockTransportService startTransport(String id, List knownNodes, Version version) { return startTransport(id, knownNodes, version, threadPool); } diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index 4b6c33f56ba09..dfc3b85dfe111 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.OS - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -24,8 +22,6 @@ task internalClusterTestNoSecurityManager(type: Test) { include noSecurityManagerITClasses systemProperty 'es.set.netty.runtime.available.processors', 'false' systemProperty 'tests.security.manager', 'false' - // Disable tests on windows https://github.com/elastic/elasticsearch/issues/44610 - onlyIf { OS.WINDOWS.equals(OS.current()) == false } } // Instead we create a separate task to run the @@ -38,8 +34,6 @@ task internalClusterTest(type: Test) { include '**/*IT.class' exclude noSecurityManagerITClasses systemProperty 'es.set.netty.runtime.available.processors', 'false' - // Disable tests on windows https://github.com/elastic/elasticsearch/issues/44610 - onlyIf { OS.WINDOWS.equals(OS.current()) == false } } check.dependsOn internalClusterTest From c36f5853d10ce13ebe6c58e535a7075bb8dd9db8 Mon Sep 17 00:00:00 2001 From: Enrico Zimuel Date: Wed, 24 Jul 2019 18:04:16 +0200 Subject: [PATCH 04/51] Fix URL documentation in API specs (#44487) --- rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.aliases.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.allocation.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.count.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.fielddata.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.health.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.help.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.indices.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.master.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.nodeattrs.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.nodes.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.pending_tasks.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.plugins.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.recovery.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.repositories.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.segments.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.shards.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.snapshots.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.tasks.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.templates.json | 2 +- .../src/main/resources/rest-api-spec/api/cat.thread_pool.json | 2 +- .../src/main/resources/rest-api-spec/api/clear_scroll.json | 2 +- .../resources/rest-api-spec/api/cluster.allocation_explain.json | 2 +- .../main/resources/rest-api-spec/api/cluster.get_settings.json | 2 +- .../src/main/resources/rest-api-spec/api/cluster.health.json | 2 +- .../main/resources/rest-api-spec/api/cluster.pending_tasks.json | 2 +- .../main/resources/rest-api-spec/api/cluster.put_settings.json | 2 +- .../main/resources/rest-api-spec/api/cluster.remote_info.json | 2 +- .../src/main/resources/rest-api-spec/api/cluster.reroute.json | 2 +- .../src/main/resources/rest-api-spec/api/cluster.state.json | 2 +- .../src/main/resources/rest-api-spec/api/cluster.stats.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/count.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/create.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/delete.json | 2 +- .../src/main/resources/rest-api-spec/api/delete_script.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/exists.json | 2 +- .../src/main/resources/rest-api-spec/api/exists_source.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/explain.json | 2 +- .../src/main/resources/rest-api-spec/api/field_caps.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/get.json | 2 +- .../src/main/resources/rest-api-spec/api/get_script.json | 2 +- .../src/main/resources/rest-api-spec/api/get_source.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/index.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.analyze.json | 2 +- .../main/resources/rest-api-spec/api/indices.clear_cache.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.close.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.create.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.delete.json | 2 +- .../main/resources/rest-api-spec/api/indices.delete_alias.json | 2 +- .../resources/rest-api-spec/api/indices.delete_template.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.exists.json | 2 +- .../main/resources/rest-api-spec/api/indices.exists_alias.json | 2 +- .../resources/rest-api-spec/api/indices.exists_template.json | 2 +- .../main/resources/rest-api-spec/api/indices.exists_type.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.flush.json | 2 +- .../main/resources/rest-api-spec/api/indices.forcemerge.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.get.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.get_alias.json | 2 +- .../resources/rest-api-spec/api/indices.get_field_mapping.json | 2 +- .../main/resources/rest-api-spec/api/indices.get_mapping.json | 2 +- .../main/resources/rest-api-spec/api/indices.get_settings.json | 2 +- .../main/resources/rest-api-spec/api/indices.get_template.json | 2 +- .../main/resources/rest-api-spec/api/indices.get_upgrade.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.open.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.put_alias.json | 2 +- .../main/resources/rest-api-spec/api/indices.put_mapping.json | 2 +- .../main/resources/rest-api-spec/api/indices.put_settings.json | 2 +- .../main/resources/rest-api-spec/api/indices.put_template.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.recovery.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.refresh.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.rollover.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.segments.json | 2 +- .../main/resources/rest-api-spec/api/indices.shard_stores.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.shrink.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.split.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.stats.json | 2 +- .../resources/rest-api-spec/api/indices.update_aliases.json | 2 +- .../src/main/resources/rest-api-spec/api/indices.upgrade.json | 2 +- .../resources/rest-api-spec/api/indices.validate_query.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/info.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/mget.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json | 2 +- .../src/main/resources/rest-api-spec/api/msearch_template.json | 2 +- .../src/main/resources/rest-api-spec/api/mtermvectors.json | 2 +- .../src/main/resources/rest-api-spec/api/nodes.hot_threads.json | 2 +- .../src/main/resources/rest-api-spec/api/nodes.info.json | 2 +- .../src/main/resources/rest-api-spec/api/nodes.stats.json | 2 +- .../src/main/resources/rest-api-spec/api/nodes.usage.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/ping.json | 2 +- .../src/main/resources/rest-api-spec/api/put_script.json | 2 +- .../resources/rest-api-spec/api/render_search_template.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/search.json | 2 +- .../src/main/resources/rest-api-spec/api/search_shards.json | 2 +- .../src/main/resources/rest-api-spec/api/search_template.json | 2 +- .../src/main/resources/rest-api-spec/api/snapshot.create.json | 2 +- .../resources/rest-api-spec/api/snapshot.create_repository.json | 2 +- .../src/main/resources/rest-api-spec/api/snapshot.delete.json | 2 +- .../resources/rest-api-spec/api/snapshot.delete_repository.json | 2 +- .../src/main/resources/rest-api-spec/api/snapshot.get.json | 2 +- .../resources/rest-api-spec/api/snapshot.get_repository.json | 2 +- .../src/main/resources/rest-api-spec/api/snapshot.restore.json | 2 +- .../src/main/resources/rest-api-spec/api/snapshot.status.json | 2 +- .../resources/rest-api-spec/api/snapshot.verify_repository.json | 2 +- .../src/main/resources/rest-api-spec/api/tasks.cancel.json | 2 +- .../src/main/resources/rest-api-spec/api/tasks.get.json | 2 +- .../src/main/resources/rest-api-spec/api/tasks.list.json | 2 +- .../src/main/resources/rest-api-spec/api/termvectors.json | 2 +- rest-api-spec/src/main/resources/rest-api-spec/api/update.json | 2 +- 108 files changed, 108 insertions(+), 108 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index e02aa1b0d611a..054e182b0ba9e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -1,6 +1,6 @@ { "bulk": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-bulk.html", "stability": "stable", "methods": ["POST", "PUT"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index 2c89c1dcdd15d..a1e7a57395852 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -1,6 +1,6 @@ { "cat.aliases": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json index 711a894094277..45d4a289e251d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json @@ -1,6 +1,6 @@ { "cat.allocation": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json index 7317e4866a255..43dd51ce5d993 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json @@ -1,6 +1,6 @@ { "cat.count": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json index acba8e58668cb..b623354ccc03e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json @@ -1,6 +1,6 @@ { "cat.fielddata": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json index 36be2f1afedd8..efc7e100ff0f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json @@ -1,6 +1,6 @@ { "cat.health": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-health.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json index 16a935517aa23..3393f31981f5c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json @@ -1,6 +1,6 @@ { "cat.help": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json index 3246b8f73ea21..726902c4f56c0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json @@ -1,6 +1,6 @@ { "cat.indices": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json index a97d3a5ca2bdd..68903adadb0ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.master.json @@ -1,6 +1,6 @@ { "cat.master": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-master.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json index 6a9ee3c424a57..d4b084931431e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json @@ -1,6 +1,6 @@ { "cat.nodeattrs": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json index 9e17bf14a42dc..c59c203f19704 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json @@ -1,6 +1,6 @@ { "cat.nodes": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json index 0fc951c424590..72094f8467630 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json @@ -1,6 +1,6 @@ { "cat.pending_tasks": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json index 3217ec0ce62f6..97b67a3041866 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json @@ -1,6 +1,6 @@ { "cat.plugins": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json index c592d8c23db40..b3433436dc9d4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json @@ -1,6 +1,6 @@ { "cat.recovery": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json index 0b006fc023cf4..d02d4c3a75944 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json @@ -1,6 +1,6 @@ { "cat.repositories": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json index 869b179a6cc7a..e9195471263d1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json @@ -1,6 +1,6 @@ { "cat.segments": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json index abf96f666e474..1e10d7a7b917e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json @@ -1,6 +1,6 @@ { "cat.shards": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json index 1087537114c14..96d0657bb28cb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json @@ -1,6 +1,6 @@ { "cat.snapshots": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json index 25b10a9147969..1c7901db5a35e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json @@ -1,6 +1,6 @@ { "cat.tasks": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json index 5bd6fd8b10273..cba6a9f1c6117 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json @@ -1,6 +1,6 @@ { "cat.templates": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json index 056a1421d719f..2d0524c7f544c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json @@ -1,6 +1,6 @@ { "cat.thread_pool": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json index 0274487544652..451b758f51334 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json @@ -1,6 +1,6 @@ { "clear_scroll": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#_clear_scroll_api", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json index 7af79af09ea9b..44a1f8b36f76b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json @@ -1,6 +1,6 @@ { "cluster.allocation_explain": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json index 40c5a8d9f0eae..919f38dd45bee 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json @@ -1,6 +1,6 @@ { "cluster.get_settings": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json index 1ea7c37943931..cb8a362d925fc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json @@ -1,6 +1,6 @@ { "cluster.health": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json index 7b8b2dbcedf1d..479a754c25698 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.pending_tasks.json @@ -1,6 +1,6 @@ { "cluster.pending_tasks": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-pending.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json index 4681640f4b12f..8b81c73766536 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json @@ -1,6 +1,6 @@ { "cluster.put_settings": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", "stability": "stable", "methods": ["PUT"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json index 4fdb89b891056..60ac408a337a4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json @@ -1,6 +1,6 @@ { "cluster.remote_info": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json index c7d8ece64d8eb..057d5cc8cf81f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.reroute.json @@ -1,6 +1,6 @@ { "cluster.reroute": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-reroute.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json index cd7f40cd86017..6dbfbf019d040 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json @@ -1,6 +1,6 @@ { "cluster.state": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html", "stability" : "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json index 192cf7dcdf5ee..b54fd535eeb49 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json @@ -1,6 +1,6 @@ { "cluster.stats": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index e115a0ae06fb4..7b290e20b7ff5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -1,6 +1,6 @@ { "count": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html", "stability": "stable", "methods": ["POST", "GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index d0e2375d0014b..6796b032898cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -1,6 +1,6 @@ { "create": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", "stability": "stable", "methods": ["PUT","POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 27c7e62ddaad0..90134b5ccbdc7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -1,6 +1,6 @@ { "delete": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json index d407aa0bfe694..a425f7ffd5b43 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json @@ -1,6 +1,6 @@ { "delete_script": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index 35112389aa392..cfbb11b86beeb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -1,6 +1,6 @@ { "exists": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", "stability": "stable", "methods": ["HEAD"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index 7a34094100d02..902a56ae135a0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -1,6 +1,6 @@ { "exists_source": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", "stability": "stable", "methods": ["HEAD"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 11f32801d4d19..d96d61f373bb6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -1,6 +1,6 @@ { "explain": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json b/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json index d41785615090b..e30471141c22f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json @@ -1,6 +1,6 @@ { "field_caps": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index f02ca3e4a4304..495db5ef0049b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -1,6 +1,6 @@ { "get": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json index 6beaf8ae85a6c..6099d7baa0bbe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script.json @@ -1,6 +1,6 @@ { "get_script": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index 0c7414f818411..0653491d9bffe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -1,6 +1,6 @@ { "get_source": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index d85e0d709944c..e972df213a288 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -1,6 +1,6 @@ { "index": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", "stability": "stable", "methods": ["POST", "PUT"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json index be119620d6cdf..05e2ccfbaad6a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json @@ -1,6 +1,6 @@ { "indices.analyze": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json index 9e591275927a8..5db4fcbf38885 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json @@ -1,6 +1,6 @@ { "indices.clear_cache": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json index 187cce0d07796..5af971362c3c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json @@ -1,6 +1,6 @@ { "indices.close": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json index dfe3236c9c053..2f71f8f1d7fc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json @@ -1,6 +1,6 @@ { "indices.create": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html", "stability": "stable", "methods": ["PUT"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json index 4c2b7eaaa2fb2..8df0b7f70d97b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json @@ -1,6 +1,6 @@ { "indices.delete": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json index c6e4ec8295443..a2e04f2e24d71 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_alias.json @@ -1,6 +1,6 @@ { "indices.delete_alias": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json index 0b3758953b3fb..bbe0c9e6aeb72 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json @@ -1,6 +1,6 @@ { "indices.delete_template": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json index b84f4d2cacbf3..08cba812b37f0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json @@ -1,6 +1,6 @@ { "indices.exists": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html", "stability": "stable", "methods": [ "HEAD" ], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index d95c423b66db5..c277842e5c002 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -1,6 +1,6 @@ { "indices.exists_alias": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", "stability": "stable", "methods": ["HEAD"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json index 8140c5748fb4d..2ffe6a7081a5f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json @@ -1,6 +1,6 @@ { "indices.exists_template": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", "stability": "stable", "methods": ["HEAD"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json index 10b69b2647f82..1559abbee90ad 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_type.json @@ -1,6 +1,6 @@ { "indices.exists_type": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-types-exists.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-types-exists.html", "stability": "stable", "methods": ["HEAD"], "deprecated" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json index 7c6c9cb32cd7d..a81f343e2b0b2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush.json @@ -1,6 +1,6 @@ { "indices.flush": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html", "stability": "stable", "methods": ["POST", "GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index c75bf0e47ef96..b058115a33385 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -1,6 +1,6 @@ { "indices.forcemerge": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json index a0e947504b3a0..4860a2fbd917a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json @@ -1,6 +1,6 @@ { "indices.get":{ - "documentation":"http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html", + "documentation":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html", "stability": "stable", "methods":[ "GET" ], "url":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json index db819f446ad1d..110df41abe0ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json @@ -1,6 +1,6 @@ { "indices.get_alias": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json index 83a199cbdc045..d473964e0ef63 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json @@ -1,6 +1,6 @@ { "indices.get_field_mapping": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json index ad55adc18933d..283e2d3d3aa36 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json @@ -1,6 +1,6 @@ { "indices.get_mapping": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json index d86f2b0a3b810..9b6ea42504288 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json @@ -1,6 +1,6 @@ { "indices.get_settings": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json index f81fc337dd4ac..3e0a63a1b6f28 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json @@ -1,6 +1,6 @@ { "indices.get_template": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_upgrade.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_upgrade.json index 4770fba4e9c4a..100f229bb4e32 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_upgrade.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_upgrade.json @@ -1,6 +1,6 @@ { "indices.get_upgrade": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-upgrade.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-upgrade.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json index 36cc75e264935..c968713535be1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json @@ -1,6 +1,6 @@ { "indices.open": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json index 8cbcfcdd25e53..c6f40fdbb8c4a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json @@ -1,6 +1,6 @@ { "indices.put_alias": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index 064c532917c74..56b7d7f5f5f9b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -1,6 +1,6 @@ { "indices.put_mapping": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index 3ef98b8b0a54c..755fcb3d8ad1e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -1,6 +1,6 @@ { "indices.put_settings": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html", "stability": "stable", "methods": ["PUT"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json index 58235afa281f2..eee0def8fad05 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json @@ -1,6 +1,6 @@ { "indices.put_template": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.recovery.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.recovery.json index 6c3b574d23b0d..5df11abc5990b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.recovery.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.recovery.json @@ -1,6 +1,6 @@ { "indices.recovery" : { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json index 157aae6c9178e..bb7eefb90e582 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.refresh.json @@ -1,6 +1,6 @@ { "indices.refresh": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html", "stability": "stable", "methods": ["POST", "GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index 3d5addef6b5cb..49cfbf93df3dc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -1,6 +1,6 @@ { "indices.rollover": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-rollover-index.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json index a39d492b5a40c..bbbdc4089d4bc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.segments.json @@ -1,6 +1,6 @@ { "indices.segments": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shard_stores.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shard_stores.json index ff64c40f6674e..484c4a5fbcf10 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shard_stores.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shard_stores.json @@ -1,6 +1,6 @@ { "indices.shard_stores": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json index 111444b7ebca8..c4f4ea32fe4ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json @@ -1,6 +1,6 @@ { "indices.shrink": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json index 10830b4bb9504..08c3fd3a407b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -1,6 +1,6 @@ { "indices.split": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index 3be4d9387245b..131e7030cc4f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -1,6 +1,6 @@ { "indices.stats": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json index 8c75adfe68bf0..4da9daa357c23 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json @@ -1,6 +1,6 @@ { "indices.update_aliases": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json index ce408a4e1e509..743d647102693 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.upgrade.json @@ -1,6 +1,6 @@ { "indices.upgrade": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-upgrade.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-upgrade.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json index a4cf1d0e387c8..9925a41ad6108 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json @@ -1,6 +1,6 @@ { "indices.validate_query": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/info.json index c1fec52fc8ca4..a0a422e8c90cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/info.json @@ -1,6 +1,6 @@ { "info": { - "documentation": "http://www.elastic.co/guide/", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json index bd8e258c2e206..13785af0d7b69 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mget.json @@ -1,6 +1,6 @@ { "mget": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 6f3e0c5e9d3bb..f3cb83a99e26a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -1,6 +1,6 @@ { "msearch": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json index 412f4a508f433..6fea78ea79398 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json @@ -1,6 +1,6 @@ { "msearch_template": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json index 7b722a5bebfa5..90eb37a160cdc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json @@ -1,6 +1,6 @@ { "mtermvectors" : { - "documentation" : "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html", + "documentation" : "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html", "stability": "stable", "methods" : ["GET", "POST"], "url" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json index b94fbaaaedf88..6ee180500b284 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json @@ -1,6 +1,6 @@ { "nodes.hot_threads": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json index ca6637d81dcb4..404f9107224ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json @@ -1,6 +1,6 @@ { "nodes.info": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index 50af7b6d478fc..6debf0db7b047 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -1,6 +1,6 @@ { "nodes.stats": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json index 6b70af58c08ea..cc90ca953d9d2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json @@ -1,6 +1,6 @@ { "nodes.usage": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json index d49dd78721659..2deec2d533c53 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json @@ -1,6 +1,6 @@ { "ping": { - "documentation": "http://www.elastic.co/guide/", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html", "stability": "stable", "methods": ["HEAD"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json index 5de2bf9bd306f..fbe3eb9b3cf6c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json @@ -1,6 +1,6 @@ { "put_script": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json index fa3af422fee17..b560fdd18ebcf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json @@ -1,6 +1,6 @@ { "render_search_template": { - "documentation": "http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html#_validating_templates", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 30f5c6747f3a8..79f580fbc7425 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -1,6 +1,6 @@ { "search": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json index 082603209925e..148923901dcb2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json @@ -1,6 +1,6 @@ { "search_shards": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-shards.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index a6777d0628126..0a5c5c3a5d797 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -1,6 +1,6 @@ { "search_template": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html", "stability": "stable", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json index 66be1eeff675c..f4aa725c5255b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json @@ -1,6 +1,6 @@ { "snapshot.create": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json index 830eb2952886e..f55385f472fc7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json @@ -1,6 +1,6 @@ { "snapshot.create_repository": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["PUT", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json index c04dc5410ea10..ce7235c72d852 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json @@ -1,6 +1,6 @@ { "snapshot.delete": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json index 1912423978173..2978a523990f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json @@ -1,6 +1,6 @@ { "snapshot.delete_repository": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["DELETE"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json index b23a3fbc0281d..a55e00f28c5e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json @@ -1,6 +1,6 @@ { "snapshot.get": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json index 9aa13758097c4..bf9462874f92b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json @@ -1,6 +1,6 @@ { "snapshot.get_repository": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json index 6107dd1663a37..cc283dcbbd54e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json @@ -1,6 +1,6 @@ { "snapshot.restore": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json index e76ce7893951e..32e09d53ca224 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json @@ -1,6 +1,6 @@ { "snapshot.status": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json index c875991b21a56..0ca87cd76475d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json @@ -1,6 +1,6 @@ { "snapshot.verify_repository": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json index 4ee287ee6454b..d747de8dbfc9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json @@ -1,6 +1,6 @@ { "tasks.cancel": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", "stability": "stable", "methods": ["POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json index 63702f7c592c6..1e4426ddcdecb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json @@ -1,6 +1,6 @@ { "tasks.get": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index 8bb31723e0be4..acc889b2b7212 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -1,6 +1,6 @@ { "tasks.list": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", "stability": "stable", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json index 2bfacaf676c67..066e71b038554 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json @@ -1,6 +1,6 @@ { "termvectors" : { - "documentation" : "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html", + "documentation" : "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html", "stability": "stable", "methods" : ["GET", "POST"], "url" : { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 4b850002a3337..e883b3ffc1eb1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -1,6 +1,6 @@ { "update": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html", "stability": "stable", "methods": ["POST"], "url": { From b0a84089b4b9cb675a52ff7dce8d08845c1c1f89 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Wed, 24 Jul 2019 11:27:35 -0600 Subject: [PATCH 05/51] Order ILM actions in policy definition documentation (#44773) We already have a note that the order of actions is up to ILM for each phase, this commit puts the actions in the same order as they will be executed. Resolves #41729 --- docs/reference/ilm/policy-definitions.asciidoc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 4949c43e6ce65..98cd4fd041328 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -90,20 +90,20 @@ policy definition. * Hot - <> - - <> - <> + - <> * Warm - <> - - <> + - <> - <> - - <> + - <> - <> - - <> + - <> * Cold - <> + - <> - <> - <> - - <> * Delete - <> From 34675caef44577a96f984614a8c05c21dfb2b825 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 24 Jul 2019 14:07:10 -0400 Subject: [PATCH 06/51] Geo: deprecate ShapeBuilder in QueryBuilders (#44715) Removes unnecessary now timeline decompositions from shape builders and deprecates ShapeBuilders in QueryBuilder in favor of libs/geo shapes. Relates to #40908 --- .../org/elasticsearch/common/geo/GeoJson.java | 18 +- .../elasticsearch/common/geo/GeometryIO.java | 307 ++++++++++++++++++ .../common/geo/GeometryIndexer.java | 5 +- .../geo/builders/LineStringBuilder.java | 28 +- .../geo/builders/MultiLineStringBuilder.java | 10 - .../common/geo/builders/PolygonBuilder.java | 71 +--- .../index/query/GeoShapeQueryBuilder.java | 185 +++++++++-- .../common/geo/GeoWKTShapeParserTests.java | 2 +- .../common/geo/GeometryIOTests.java | 110 +++++++ .../common/geo/GeometryParserTests.java | 4 +- .../common/geo/ShapeBuilderTests.java | 65 ++-- .../query/GeoShapeQueryBuilderTests.java | 2 +- .../elasticsearch/geo/GeometryTestUtils.java | 32 +- 13 files changed, 655 insertions(+), 184 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/geo/GeometryIO.java create mode 100644 server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java index ec5600836f1e4..032fddce6b77f 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoJson.java @@ -382,17 +382,17 @@ public static String getGeoJsonName(Geometry geometry) { return geometry.visit(new GeometryVisitor<>() { @Override public String visit(Circle circle) { - return "Circle"; + return "circle"; } @Override public String visit(GeometryCollection collection) { - return "GeometryCollection"; + return "geometrycollection"; } @Override public String visit(Line line) { - return "LineString"; + return "linestring"; } @Override @@ -402,32 +402,32 @@ public String visit(LinearRing ring) { @Override public String visit(MultiLine multiLine) { - return "MultiLineString"; + return "multilinestring"; } @Override public String visit(MultiPoint multiPoint) { - return "MultiPoint"; + return "multipoint"; } @Override public String visit(MultiPolygon multiPolygon) { - return "MultiPolygon"; + return "multipolygon"; } @Override public String visit(Point point) { - return "Point"; + return "point"; } @Override public String visit(Polygon polygon) { - return "Polygon"; + return "polygon"; } @Override public String visit(Rectangle rectangle) { - return "Envelope"; + return "envelope"; } }); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryIO.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryIO.java new file mode 100644 index 0000000000000..fb8b2327f3951 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryIO.java @@ -0,0 +1,307 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.Line; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Polygon; +import org.elasticsearch.geo.geometry.Rectangle; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Utility class for binary serializtion/deserialization of libs/geo classes + */ +public final class GeometryIO { + + public static void writeGeometry(StreamOutput out, Geometry geometry) throws IOException { + out.writeString(GeoJson.getGeoJsonName(geometry).toLowerCase(Locale.ROOT)); + geometry.visit(new GeometryVisitor() { + @Override + public Void visit(Circle circle) throws IOException { + throw new UnsupportedOperationException("circle is not supported"); + } + + @Override + public Void visit(GeometryCollection collection) throws IOException { + out.writeVInt(collection.size()); + for (Geometry shape : collection) { + writeGeometry(out, shape); + } + return null; + } + + @Override + public Void visit(Line line) throws IOException { + writeCoordinates(line); + return null; + } + + @Override + public Void visit(LinearRing ring) { + throw new UnsupportedOperationException("linear ring is not supported"); + } + + @Override + public Void visit(MultiLine multiLine) throws IOException { + out.writeVInt(multiLine.size()); + for (Line line : multiLine) { + visit(line); + } + return null; + } + + @Override + public Void visit(MultiPoint multiPoint) throws IOException { + out.writeVInt(multiPoint.size()); + for (int i = 0; i < multiPoint.size(); i++) { + Point point = multiPoint.get(i); + writeCoordinate(point.getLat(), point.getLon(), point.getAlt()); + } + return null; + } + + @Override + public Void visit(MultiPolygon multiPolygon) throws IOException { + out.writeBoolean(true); // Orientation for BWC with ShapeBuilder + out.writeVInt(multiPolygon.size()); + for (int i = 0; i < multiPolygon.size(); i++) { + visit(multiPolygon.get(i)); + } + return null; + } + + @Override + public Void visit(Point point) throws IOException { + out.writeVInt(1); // Number of points For BWC with Shape Builder + writeCoordinate(point.getLat(), point.getLon(), point.getAlt()); + return null; + } + + @Override + public Void visit(Polygon polygon) throws IOException { + writeCoordinates(polygon.getPolygon()); + out.writeBoolean(true); // Orientation for BWC with ShapeBuilder + out.writeVInt(polygon.getNumberOfHoles()); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + writeCoordinates(polygon.getHole(i)); + } + return null; + } + + @Override + public Void visit(Rectangle rectangle) throws IOException { + writeCoordinate(rectangle.getMaxLat(), rectangle.getMinLon(), rectangle.getMinAlt()); // top left + writeCoordinate(rectangle.getMinLat(), rectangle.getMaxLon(), rectangle.getMaxAlt()); // bottom right + return null; + } + + private void writeCoordinate(double lat, double lon, double alt) throws IOException { + out.writeDouble(lon); + out.writeDouble(lat); + out.writeOptionalDouble(Double.isNaN(alt) ? null : alt); + } + + private void writeCoordinates(Line line) throws IOException { + out.writeVInt(line.length()); + for (int i = 0; i < line.length(); i++) { + writeCoordinate(line.getLat(i), line.getLon(i), line.getAlt(i)); + } + } + + }); + } + + public static Geometry readGeometry(StreamInput in) throws IOException { + String type = in.readString(); + switch (type) { + case "geometrycollection": + return readGeometryCollection(in); + case "polygon": + return readPolygon(in); + case "point": + return readPoint(in); + case "linestring": + return readLine(in); + case "multilinestring": + return readMultiLine(in); + case "multipoint": + return readMultiPoint(in); + case "multipolygon": + return readMultiPolygon(in); + case "envelope": + return readRectangle(in); + default: + throw new UnsupportedOperationException("unsupported shape type " + type); + } + } + + private static GeometryCollection readGeometryCollection(StreamInput in) throws IOException { + int size = in.readVInt(); + List shapes = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + shapes.add(readGeometry(in)); + } + return new GeometryCollection<>(shapes); + } + + private static Polygon readPolygon(StreamInput in) throws IOException { + double[][] shellComponents = readLineComponents(in); + boolean orientation = in.readBoolean(); + LinearRing shell = buildLinearRing(shellComponents, orientation); + int numberOfHoles = in.readVInt(); + if (numberOfHoles > 0) { + List holes = new ArrayList<>(numberOfHoles); + for (int i = 0; i < numberOfHoles; i++) { + holes.add(buildLinearRing(readLineComponents(in), orientation)); + } + return new Polygon(shell, holes); + } else { + return new Polygon(shell); + } + } + + private static double[][] readLineComponents(StreamInput in) throws IOException { + int len = in.readVInt(); + double[] lat = new double[len]; + double[] lon = new double[len]; + double[] alt = new double[len]; + for (int i = 0; i < len; i++) { + lon[i] = in.readDouble(); + lat[i] = in.readDouble(); + alt[i] = readAlt(in); + } + if (Double.isNaN(alt[0])) { + return new double[][]{lat, lon}; + } else { + return new double[][]{lat, lon, alt}; + } + } + + private static void reverse(double[][] arr) { + for (double[] carr : arr) { + int len = carr.length; + for (int j = 0; j < len / 2; j++) { + double temp = carr[j]; + carr[j] = carr[len - j - 1]; + carr[len - j - 1] = temp; + } + } + } + + private static LinearRing buildLinearRing(double[][] arr, boolean orientation) { + if (orientation == false) { + reverse(arr); + } + if (arr.length == 3) { + return new LinearRing(arr[0], arr[1], arr[2]); + } else { + return new LinearRing(arr[0], arr[1]); + } + } + + private static Point readPoint(StreamInput in) throws IOException { + int size = in.readVInt(); // For BWC with Shape Builder + if (size != 1) { + throw new IOException("Unexpected point count " + size); + } + double lon = in.readDouble(); + double lat = in.readDouble(); + double alt = readAlt(in); + return new Point(lat, lon, alt); + } + + private static Line readLine(StreamInput in) throws IOException { + double[][] coords = readLineComponents(in); + if (coords.length == 3) { + return new Line(coords[0], coords[1], coords[2]); + } else { + return new Line(coords[0], coords[1]); + } + } + + private static MultiLine readMultiLine(StreamInput in) throws IOException { + int size = in.readVInt(); + List lines = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + lines.add(readLine(in)); + } + return new MultiLine(lines); + } + + private static MultiPoint readMultiPoint(StreamInput in) throws IOException { + int size = in.readVInt(); + List points = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + double lon = in.readDouble(); + double lat = in.readDouble(); + double alt = readAlt(in); + points.add(new Point(lat, lon, alt)); + } + return new MultiPoint(points); + } + + + private static MultiPolygon readMultiPolygon(StreamInput in) throws IOException { + in.readBoolean(); // orientation for BWC + int size = in.readVInt(); + List polygons = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + polygons.add(readPolygon(in)); + } + return new MultiPolygon(polygons); + } + + private static Rectangle readRectangle(StreamInput in) throws IOException { + // top left + double minLon = in.readDouble(); + double maxLat = in.readDouble(); + double minAlt = readAlt(in); + + // bottom right + double maxLon = in.readDouble(); + double minLat = in.readDouble(); + double maxAlt = readAlt(in); + + return new Rectangle(minLat, maxLat, minLon, maxLon, minAlt, maxAlt); + } + + private static double readAlt(StreamInput in) throws IOException { + Double alt = in.readOptionalDouble(); + if (alt == null) { + return Double.NaN; + } else { + return alt; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java index 48f84b1211b92..6d6270e49bbfb 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java @@ -245,6 +245,7 @@ private List decompose(double dateline, double[] lons, double[] lats) { for (int i = 1; i < lons.length; i++) { double t = intersection(lastLon, lons[i], dateline); + lastLon = lons[i]; if (Double.isNaN(t) == false) { double[] partLons = Arrays.copyOfRange(lons, offset, i + 1); double[] partLats = Arrays.copyOfRange(lats, offset, i + 1); @@ -330,7 +331,7 @@ private void validateHole(LinearRing shell, LinearRing hole) { exterior.add(new Point(shell.getLat(i), shell.getLon(i))); } for (int i = 0; i < hole.length(); i++) { - interior.remove(new Point(hole.getLat(i), hole.getLon(i))); + interior.add(new Point(hole.getLat(i), hole.getLon(i))); } exterior.retainAll(interior); if (exterior.size() >= 2) { @@ -645,7 +646,7 @@ private static Edge[] concat(int component, boolean direction, Point[] points, f edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(nextPoint, null); edges[edgeOffset + i - 1].component = component; } else { - throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + nextPoint); + throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: (" + nextPoint + ")"); } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index 8e1e9d7a993b2..3b8e0522d4f4e 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.geo.geometry.Line; -import org.elasticsearch.geo.geometry.MultiLine; import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.Geometry; import org.locationtech.jts.geom.GeometryFactory; @@ -36,9 +35,6 @@ import java.util.Arrays; import java.util.List; -import static org.elasticsearch.common.geo.GeoUtils.normalizeLat; -import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; - public class LineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; @@ -126,18 +122,8 @@ public JtsGeometry buildS4J() { @Override public org.elasticsearch.geo.geometry.Geometry buildGeometry() { - // decompose linestrings crossing dateline into array of Lines - Coordinate[] coordinates = this.coordinates.toArray(new Coordinate[this.coordinates.size()]); - if (wrapdateline) { - List linestrings = decomposeGeometry(coordinates, new ArrayList<>()); - if (linestrings.size() == 1) { - return linestrings.get(0); - } else { - return new MultiLine(linestrings); - } - } - return new Line(Arrays.stream(coordinates).mapToDouble(i->normalizeLat(i.y)).toArray(), - Arrays.stream(coordinates).mapToDouble(i->normalizeLon(i.x)).toArray()); + return new Line(coordinates.stream().mapToDouble(i->i.y).toArray(), + coordinates.stream().mapToDouble(i->i.x).toArray()); } static ArrayList decomposeS4J(GeometryFactory factory, Coordinate[] coordinates, ArrayList strings) { @@ -149,16 +135,6 @@ static ArrayList decomposeS4J(GeometryFactory factory, Coordinate[] return strings; } - static List decomposeGeometry(Coordinate[] coordinates, List lines) { - for (Coordinate[] part : decompose(+DATELINE, coordinates)) { - for (Coordinate[] line : decompose(-DATELINE, part)) { - lines.add(new Line(Arrays.stream(line).mapToDouble(i->normalizeLat(i.y)).toArray(), - Arrays.stream(line).mapToDouble(i->normalizeLon(i.x)).toArray())); - } - } - return lines; - } - /** * Decompose a linestring given as array of coordinates at a vertical line. * diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 24a8b3b226f36..558f74b1c997e 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -154,16 +154,6 @@ public org.elasticsearch.geo.geometry.Geometry buildGeometry() { if (lines.isEmpty()) { return MultiLine.EMPTY; } - if (wrapdateline) { - List parts = new ArrayList<>(); - for (LineStringBuilder line : lines) { - LineStringBuilder.decomposeGeometry(line.coordinates(false), parts); - } - if (parts.size() == 1) { - return parts.get(0); - } - return new MultiLine(parts); - } List linestrings = new ArrayList<>(lines.size()); for (int i = 0; i < lines.size(); ++i) { LineStringBuilder lsb = lines.get(i); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 97503efc033b0..bff882254afc4 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -19,12 +19,6 @@ package org.elasticsearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.GeometryFactory; -import org.locationtech.jts.geom.LinearRing; -import org.locationtech.jts.geom.MultiPolygon; -import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; @@ -32,13 +26,18 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -47,8 +46,6 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; -import static org.elasticsearch.common.geo.GeoUtils.normalizeLat; -import static org.elasticsearch.common.geo.GeoUtils.normalizeLon; import static org.apache.lucene.geo.GeoUtils.orient; /** @@ -235,12 +232,6 @@ public JtsGeometry buildS4J() { @Override public org.elasticsearch.geo.geometry.Geometry buildGeometry() { - if (wrapdateline) { - Coordinate[][][] polygons = coordinates(); - return polygons.length == 1 - ? polygonGeometry(polygons[0]) - : multipolygon(polygons); - } return toPolygonGeometry(); } @@ -294,15 +285,12 @@ public org.elasticsearch.geo.geometry.Polygon toPolygonGeometry() { for (int i = 0; i < this.holes.size(); ++i) { holes.add(linearRing(this.holes.get(i).coordinates)); } - return new org.elasticsearch.geo.geometry.Polygon( - new org.elasticsearch.geo.geometry.LinearRing( - this.shell.coordinates.stream().mapToDouble(i -> normalizeLat(i.y)).toArray(), - this.shell.coordinates.stream().mapToDouble(i -> normalizeLon(i.x)).toArray()), holes); + return new org.elasticsearch.geo.geometry.Polygon(linearRing(this.shell.coordinates), holes); } protected static org.elasticsearch.geo.geometry.LinearRing linearRing(List coordinates) { - return new org.elasticsearch.geo.geometry.LinearRing(coordinates.stream().mapToDouble(i -> normalizeLat(i.y)).toArray(), - coordinates.stream().mapToDouble(i -> normalizeLon(i.x)).toArray()); + return new org.elasticsearch.geo.geometry.LinearRing(coordinates.stream().mapToDouble(i -> i.y).toArray(), + coordinates.stream().mapToDouble(i -> i.x).toArray()); } protected static LinearRing linearRingS4J(GeometryFactory factory, List coordinates) { @@ -338,39 +326,6 @@ protected static Polygon polygonS4J(GeometryFactory factory, Coordinate[][] poly return factory.createPolygon(shell, holes); } - protected static org.elasticsearch.geo.geometry.Polygon polygonGeometry(Coordinate[][] polygon) { - List holes; - Coordinate[] shell = polygon[0]; - if (polygon.length > 1) { - holes = new ArrayList<>(polygon.length - 1); - for (int i = 1; i < polygon.length; ++i) { - Coordinate[] coords = polygon[i]; - //We do not have holes on the dateline as they get eliminated - //when breaking the polygon around it. - double[] x = new double[coords.length]; - double[] y = new double[coords.length]; - for (int c = 0; c < coords.length; ++c) { - x[c] = normalizeLon(coords[c].x); - y[c] = normalizeLat(coords[c].y); - } - holes.add(new org.elasticsearch.geo.geometry.LinearRing(y, x)); - } - } else { - holes = Collections.emptyList(); - } - - double[] x = new double[shell.length]; - double[] y = new double[shell.length]; - for (int i = 0; i < shell.length; ++i) { - //Lucene Tessellator treats different +180 and -180 and we should keep the sign. - //normalizeLon method excludes -180. - x[i] = Math.abs(shell[i].x) > 180 ? normalizeLon(shell[i].x) : shell[i].x; - y[i] = normalizeLat(shell[i].y); - } - - return new org.elasticsearch.geo.geometry.Polygon(new org.elasticsearch.geo.geometry.LinearRing(y, x), holes); - } - /** * Create a Multipolygon from a set of coordinates. Each primary array contains a polygon which * in turn contains an array of linestrings. These line Strings are represented as an array of @@ -389,14 +344,6 @@ protected static MultiPolygon multipolygonS4J(GeometryFactory factory, Coordinat return factory.createMultiPolygon(polygonSet); } - protected static org.elasticsearch.geo.geometry.MultiPolygon multipolygon(Coordinate[][][] polygons) { - List polygonSet = new ArrayList<>(polygons.length); - for (int i = 0; i < polygons.length; ++i) { - polygonSet.add(polygonGeometry(polygons[i])); - } - return new org.elasticsearch.geo.geometry.MultiPolygon(polygonSet); - } - /** * This method sets the component id of all edges in a ring to a given id and shifts the * coordinates of this component according to the dateline diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index b651a26d7e280..654aaf9751ca0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -40,9 +40,21 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.geo.GeoJson; import org.elasticsearch.common.geo.GeoShapeType; +import org.elasticsearch.common.geo.GeometryIO; +import org.elasticsearch.common.geo.GeometryIndexer; +import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.common.geo.builders.EnvelopeBuilder; +import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; +import org.elasticsearch.common.geo.builders.LineStringBuilder; +import org.elasticsearch.common.geo.builders.MultiLineStringBuilder; +import org.elasticsearch.common.geo.builders.MultiPointBuilder; +import org.elasticsearch.common.geo.builders.MultiPolygonBuilder; +import org.elasticsearch.common.geo.builders.PointBuilder; +import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.io.stream.StreamInput; @@ -62,11 +74,16 @@ import org.elasticsearch.geo.geometry.MultiPoint; import org.elasticsearch.geo.geometry.MultiPolygon; import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.geo.geometry.Rectangle; import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Objects; import java.util.function.Supplier; @@ -104,8 +121,8 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder supplier; + private final Geometry shape; + private final Supplier supplier; private SpatialStrategy strategy; @@ -129,11 +146,28 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder supplier, String indexedShapeId, + private GeoShapeQueryBuilder(String fieldName, Supplier supplier, String indexedShapeId, @Nullable String indexedShapeType) { this.fieldName = fieldName; this.shape = null; @@ -195,7 +229,7 @@ public GeoShapeQueryBuilder(StreamInput in) throws IOException { super(in); fieldName = in.readString(); if (in.readBoolean()) { - shape = in.readNamedWriteable(ShapeBuilder.class); + shape = GeometryIO.readGeometry(in); indexedShapeId = null; indexedShapeType = null; } else { @@ -221,7 +255,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { boolean hasShape = shape != null; out.writeBoolean(hasShape); if (hasShape) { - out.writeNamedWriteable(shape); + GeometryIO.writeGeometry(out, shape);; } else { out.writeOptionalString(indexedShapeId); out.writeOptionalString(indexedShapeType); @@ -244,7 +278,7 @@ public String fieldName() { /** * @return the shape used in the Query */ - public ShapeBuilder shape() { + public Geometry shape() { return shape; } @@ -397,7 +431,6 @@ protected Query doToQuery(QueryShardContext context) { if (shape == null || supplier != null) { throw new UnsupportedOperationException("query must be rewritten first"); } - final ShapeBuilder shapeToQuery = shape; final MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { if (ignoreUnmapped) { @@ -425,32 +458,36 @@ protected Query doToQuery(QueryShardContext context) { // in this case, execute disjoint as exists && !intersects BooleanQuery.Builder bool = new BooleanQuery.Builder(); Query exists = ExistsQueryBuilder.newFilter(context, fieldName); - Query intersects = prefixTreeStrategy.makeQuery(getArgs(shapeToQuery, ShapeRelation.INTERSECTS)); + Query intersects = prefixTreeStrategy.makeQuery(getArgs(shape, ShapeRelation.INTERSECTS)); bool.add(exists, BooleanClause.Occur.MUST); bool.add(intersects, BooleanClause.Occur.MUST_NOT); query = new ConstantScoreQuery(bool.build()); } else { - query = new ConstantScoreQuery(prefixTreeStrategy.makeQuery(getArgs(shapeToQuery, relation))); + query = new ConstantScoreQuery(prefixTreeStrategy.makeQuery(getArgs(shape, relation))); } } else { - query = new ConstantScoreQuery(getVectorQuery(context, shapeToQuery)); + query = new ConstantScoreQuery(getVectorQuery(context, shape)); } return query; } - private Query getVectorQuery(QueryShardContext context, ShapeBuilder queryShapeBuilder) { + private Query getVectorQuery(QueryShardContext context, Geometry queryShape) { // CONTAINS queries are not yet supported by VECTOR strategy if (relation == ShapeRelation.CONTAINS) { throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]"); } - // wrap geoQuery as a ConstantScoreQuery - return getVectorQueryFromShape(context, queryShapeBuilder.buildGeometry()); - } + // TODO: Move this to QueryShardContext + GeometryIndexer geometryIndexer = new GeometryIndexer(true); + + Geometry processedShape = geometryIndexer.prepareForIndexing(queryShape); - private Query getVectorQueryFromShape(QueryShardContext context, Geometry queryShape) { - return queryShape.visit(new GeometryVisitor() { + if (processedShape == null) { + return new MatchNoDocsQuery(); + } + + return processedShape.visit(new GeometryVisitor() { @Override public Query visit(Circle circle) { throw new QueryShardException(context, "Field [" + fieldName + "] found and unknown shape Circle"); @@ -536,7 +573,7 @@ public Query visit(org.elasticsearch.geo.geometry.Rectangle r) { * Name or path of the field in the Shape Document where the * Shape itself is located */ - private void fetch(Client client, GetRequest getRequest, String path, ActionListener listener) { + private void fetch(Client client, GetRequest getRequest, String path, ActionListener listener) { getRequest.preference("_local"); client.get(getRequest, new ActionListener(){ @@ -565,7 +602,7 @@ public void onResponse(GetResponse response) { if (pathElements[currentPathSlot].equals(parser.currentName())) { parser.nextToken(); if (++currentPathSlot == pathElements.length) { - listener.onResponse(ShapeParser.parse(parser)); + listener.onResponse(new GeometryParser(true, true, true).parse(parser)); return; } } else { @@ -589,16 +626,16 @@ public void onFailure(Exception e) { } - public static SpatialArgs getArgs(ShapeBuilder shape, ShapeRelation relation) { + public static SpatialArgs getArgs(Geometry shape, ShapeRelation relation) { switch (relation) { case DISJOINT: - return new SpatialArgs(SpatialOperation.IsDisjointTo, shape.buildS4J()); + return new SpatialArgs(SpatialOperation.IsDisjointTo, buildS4J(shape)); case INTERSECTS: - return new SpatialArgs(SpatialOperation.Intersects, shape.buildS4J()); + return new SpatialArgs(SpatialOperation.Intersects, buildS4J(shape)); case WITHIN: - return new SpatialArgs(SpatialOperation.IsWithin, shape.buildS4J()); + return new SpatialArgs(SpatialOperation.IsWithin, buildS4J(shape)); case CONTAINS: - return new SpatialArgs(SpatialOperation.Contains, shape.buildS4J()); + return new SpatialArgs(SpatialOperation.Contains, buildS4J(shape)); default: throw new IllegalArgumentException("invalid relation [" + relation + "]"); } @@ -616,7 +653,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep if (shape != null) { builder.field(SHAPE_FIELD.getPreferredName()); - shape.toXContent(builder, params); + GeoJson.toXContent(shape, builder,params); } else { builder.startObject(INDEXED_SHAPE_FIELD.getPreferredName()) .field(SHAPE_ID_FIELD.getPreferredName(), indexedShapeId); @@ -797,7 +834,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws return supplier.get() == null ? this : new GeoShapeQueryBuilder(this.fieldName, supplier.get()).relation(relation).strategy (strategy); } else if (this.shape == null) { - SetOnce supplier = new SetOnce<>(); + SetOnce supplier = new SetOnce<>(); queryRewriteContext.registerAsyncAction((client, listener) -> { GetRequest getRequest; if (indexedShapeType == null) { @@ -816,4 +853,96 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws } return this; } + + /** + * Builds JTS shape from a geometry + * + * This method is needed to handle legacy indices and will be removed when we no longer need to build JTS shapes + */ + private static Shape buildS4J(Geometry geometry) { + return geometryToShapeBuilder(geometry).buildS4J(); + } + + public static ShapeBuilder geometryToShapeBuilder(Geometry geometry) { + ShapeBuilder shapeBuilder = geometry.visit(new GeometryVisitor<>() { + @Override + public ShapeBuilder visit(Circle circle) { + throw new UnsupportedOperationException("circle is not supported"); + } + + @Override + public ShapeBuilder visit(GeometryCollection collection) { + GeometryCollectionBuilder shapes = new GeometryCollectionBuilder(); + for (Geometry geometry : collection) { + shapes.shape(geometry.visit(this)); + } + return shapes; + } + + @Override + public ShapeBuilder visit(org.elasticsearch.geo.geometry.Line line) { + List coordinates = new ArrayList<>(); + for (int i = 0; i < line.length(); i++) { + coordinates.add(new Coordinate(line.getLon(i), line.getLat(i), line.getAlt(i))); + } + return new LineStringBuilder(coordinates); + } + + @Override + public ShapeBuilder visit(LinearRing ring) { + throw new UnsupportedOperationException("circle is not supported"); + } + + @Override + public ShapeBuilder visit(MultiLine multiLine) { + MultiLineStringBuilder lines = new MultiLineStringBuilder(); + for (int i = 0; i < multiLine.size(); i++) { + lines.linestring((LineStringBuilder) visit(multiLine.get(i))); + } + return lines; + } + + @Override + public ShapeBuilder visit(MultiPoint multiPoint) { + List coordinates = new ArrayList<>(); + for (int i = 0; i < multiPoint.size(); i++) { + Point p = multiPoint.get(i); + coordinates.add(new Coordinate(p.getLon(), p.getLat(), p.getAlt())); + } + return new MultiPointBuilder(coordinates); + } + + @Override + public ShapeBuilder visit(MultiPolygon multiPolygon) { + MultiPolygonBuilder polygons = new MultiPolygonBuilder(); + for (int i = 0; i < multiPolygon.size(); i++) { + polygons.polygon((PolygonBuilder) visit(multiPolygon.get(i))); + } + return polygons; + } + + @Override + public ShapeBuilder visit(Point point) { + return new PointBuilder(point.getLon(), point.getLat()); + } + + @Override + public ShapeBuilder visit(org.elasticsearch.geo.geometry.Polygon polygon) { + PolygonBuilder polygonBuilder = + new PolygonBuilder((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getPolygon()), + ShapeBuilder.Orientation.RIGHT, false); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + polygonBuilder.hole((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getHole(i))); + } + return polygonBuilder; + } + + @Override + public ShapeBuilder visit(Rectangle rectangle) { + return new EnvelopeBuilder(new Coordinate(rectangle.getMinLon(), rectangle.getMaxLat()), + new Coordinate(rectangle.getMaxLon(), rectangle.getMinLat())); + } + }); + return shapeBuilder; + } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 44016145750ed..d8559b3b1260e 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -470,7 +470,7 @@ public void testParseGeometryCollection() throws IOException, ParseException { } else { GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(random()); assertExpected(gcb.buildS4J(), gcb, true); - assertExpected(gcb.buildGeometry(), gcb, false); + assertExpected(new GeometryIndexer(true).prepareForIndexing(gcb.buildGeometry()), gcb, false); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java new file mode 100644 index 0000000000000..14fc710e2683c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo; + +import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.ShapeType; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.geo.GeometryTestUtils.randomGeometry; +import static org.elasticsearch.index.query.GeoShapeQueryBuilder.geometryToShapeBuilder; + +public class GeometryIOTests extends ESTestCase { + + public void testRandomSerialization() throws Exception { + for (int i = 0; i < randomIntBetween(1, 20); i++) { + boolean hasAlt = randomBoolean(); + Geometry geometry = randomGeometry(hasAlt); + if (shapeSupported(geometry) && randomBoolean()) { + // Shape builder conversion doesn't support altitude + ShapeBuilder shapeBuilder = geometryToShapeBuilder(geometry); + if (randomBoolean()) { + Geometry actual = shapeBuilder.buildGeometry(); + assertEquals(geometry, actual); + } + if (randomBoolean()) { + // Test ShapeBuilder -> Geometry Serialization + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeNamedWriteable(shapeBuilder); + try (StreamInput in = out.bytes().streamInput()) { + Geometry actual = GeometryIO.readGeometry(in); + assertEquals(geometry, actual); + assertEquals(0, in.available()); + } + } + } else { + // Test Geometry -> ShapeBuilder Serialization + try (BytesStreamOutput out = new BytesStreamOutput()) { + GeometryIO.writeGeometry(out, geometry); + try (StreamInput in = out.bytes().streamInput()) { + try (StreamInput nin = new NamedWriteableAwareStreamInput(in, this.writableRegistry())) { + ShapeBuilder actual = nin.readNamedWriteable(ShapeBuilder.class); + assertEquals(shapeBuilder, actual); + assertEquals(0, in.available()); + } + } + } + } + // Test Geometry -> Geometry + try (BytesStreamOutput out = new BytesStreamOutput()) { + GeometryIO.writeGeometry(out, geometry); + ; + try (StreamInput in = out.bytes().streamInput()) { + Geometry actual = GeometryIO.readGeometry(in); + assertEquals(geometry, actual); + assertEquals(0, in.available()); + } + } + + } + } + } + + private boolean shapeSupported(Geometry geometry) { + if (geometry.hasAlt()) { + return false; + } + + if (geometry.type() == ShapeType.CIRCLE) { + return false; + } + + if (geometry.type() == ShapeType.GEOMETRYCOLLECTION) { + GeometryCollection collection = (GeometryCollection) geometry; + for (Geometry g : collection) { + if (shapeSupported(g) == false) { + return false; + } + } + } + return true; + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + return new NamedWriteableRegistry(GeoShapeType.getShapeWriteables()); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java index 4cef86b1d570e..68492317f4791 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryParserTests.java @@ -51,7 +51,7 @@ public void testGeoJsonParsing() throws Exception { assertEquals(new Point(0, 100), format.fromXContent(parser)); XContentBuilder newGeoJson = XContentFactory.jsonBuilder(); format.toXContent(new Point(10, 100), newGeoJson, ToXContent.EMPTY_PARAMS); - assertEquals("{\"type\":\"Point\",\"coordinates\":[100.0,10.0]}", Strings.toString(newGeoJson)); + assertEquals("{\"type\":\"point\",\"coordinates\":[100.0,10.0]}", Strings.toString(newGeoJson)); } XContentBuilder pointGeoJsonWithZ = XContentFactory.jsonBuilder() @@ -148,7 +148,7 @@ public void testNullParsing() throws Exception { // if we serialize non-null value - it should be serialized as geojson format.toXContent(new Point(10, 100), newGeoJson, ToXContent.EMPTY_PARAMS); newGeoJson.endObject(); - assertEquals("{\"val\":{\"type\":\"Point\",\"coordinates\":[100.0,10.0]}}", Strings.toString(newGeoJson)); + assertEquals("{\"val\":{\"type\":\"point\",\"coordinates\":[100.0,10.0]}}", Strings.toString(newGeoJson)); newGeoJson = XContentFactory.jsonBuilder().startObject().field("val"); format.toXContent(null, newGeoJson, ToXContent.EMPTY_PARAMS); diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index 3c653db2d1537..bd6c4a2da557f 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -19,12 +19,8 @@ package org.elasticsearch.common.geo; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.LineString; -import org.locationtech.jts.geom.Polygon; - -import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.CircleBuilder; +import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.EnvelopeBuilder; import org.elasticsearch.common.geo.builders.LineStringBuilder; import org.elasticsearch.common.geo.builders.MultiLineStringBuilder; @@ -32,6 +28,9 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.test.ESTestCase; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.Polygon; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.Circle; import org.locationtech.spatial4j.shape.Point; @@ -161,7 +160,7 @@ public void testLineStringBuilder() { .coordinate(-110.0, 55.0)); lsb.buildS4J(); - lsb.buildGeometry(); + buildGeometry(lsb); // Building a linestring that needs to be wrapped lsb = new LineStringBuilder(new CoordinatesBuilder() @@ -175,7 +174,7 @@ public void testLineStringBuilder() { .coordinate(130.0, 60.0)); lsb.buildS4J(); - lsb.buildGeometry(); + buildGeometry(lsb); // Building a lineString on the dateline lsb = new LineStringBuilder(new CoordinatesBuilder() @@ -185,7 +184,7 @@ public void testLineStringBuilder() { .coordinate(-180.0, -80.0)); lsb.buildS4J(); - lsb.buildGeometry(); + buildGeometry(lsb); // Building a lineString on the dateline lsb = new LineStringBuilder(new CoordinatesBuilder() @@ -195,7 +194,7 @@ public void testLineStringBuilder() { .coordinate(180.0, -80.0)); lsb.buildS4J(); - lsb.buildGeometry(); + buildGeometry(lsb); } public void testMultiLineString() { @@ -215,7 +214,7 @@ public void testMultiLineString() { ) ); mlsb.buildS4J(); - mlsb.buildGeometry(); + buildGeometry(mlsb); // LineString that needs to be wrapped new MultiLineStringBuilder() @@ -235,7 +234,7 @@ public void testMultiLineString() { ); mlsb.buildS4J(); - mlsb.buildGeometry(); + buildGeometry(mlsb); } public void testPolygonSelfIntersection() { @@ -283,7 +282,7 @@ public void testPolygonWrapping() { .close()); assertMultiPolygon(pb.buildS4J(), true); - assertMultiPolygon(pb.buildGeometry(), false); + assertMultiPolygon(buildGeometry(pb), false); } public void testLineStringWrapping() { @@ -295,7 +294,7 @@ public void testLineStringWrapping() { .close()); assertMultiLineString(lsb.buildS4J(), true); - assertMultiLineString(lsb.buildGeometry(), false); + assertMultiLineString(buildGeometry(lsb), false); } public void testDatelineOGC() { @@ -339,7 +338,7 @@ public void testDatelineOGC() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildGeometry(), false); + assertMultiPolygon(buildGeometry(builder.close()), false); } public void testDateline() { @@ -383,7 +382,7 @@ public void testDateline() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildGeometry(), false); + assertMultiPolygon(buildGeometry(builder.close()), false); } public void testComplexShapeWithHole() { @@ -458,7 +457,7 @@ public void testComplexShapeWithHole() { ) ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildGeometry(), false); + assertPolygon(buildGeometry(builder.close()), false); } public void testShapeWithHoleAtEdgeEndPoints() { @@ -480,7 +479,7 @@ public void testShapeWithHoleAtEdgeEndPoints() { .coordinate(4, 1) )); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildGeometry(), false); + assertPolygon(buildGeometry(builder.close()), false); } public void testShapeWithPointOnDateline() { @@ -491,7 +490,7 @@ public void testShapeWithPointOnDateline() { .coordinate(180, 0) ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildGeometry(), false); + assertPolygon(buildGeometry(builder.close()), false); } public void testShapeWithEdgeAlongDateline() { @@ -504,7 +503,7 @@ public void testShapeWithEdgeAlongDateline() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildGeometry(), false); + assertPolygon(buildGeometry(builder.close()), false); // test case 2: test the negative side of the dateline builder = new PolygonBuilder(new CoordinatesBuilder() @@ -515,7 +514,7 @@ public void testShapeWithEdgeAlongDateline() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildGeometry(), false); + assertPolygon(buildGeometry(builder.close()), false); } public void testShapeWithBoundaryHoles() { @@ -537,7 +536,7 @@ public void testShapeWithBoundaryHoles() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildGeometry(), false); + assertMultiPolygon(buildGeometry(builder.close()), false); // test case 2: test the negative side of the dateline builder = new PolygonBuilder( @@ -560,7 +559,7 @@ public void testShapeWithBoundaryHoles() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildGeometry(), false); + assertMultiPolygon(buildGeometry(builder.close()), false); } public void testShapeWithTangentialHole() { @@ -582,7 +581,7 @@ public void testShapeWithTangentialHole() { )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildGeometry(), false); + assertMultiPolygon(buildGeometry(builder.close()), false); } public void testShapeWithInvalidTangentialHole() { @@ -606,7 +605,7 @@ public void testShapeWithInvalidTangentialHole() { e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J()); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry()); + e = expectThrows(IllegalArgumentException.class, () -> buildGeometry(builder.close())); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } @@ -634,7 +633,7 @@ public void testBoundaryShapeWithTangentialHole() { .coordinate(172, 0) )); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildGeometry(), false); + assertMultiPolygon(buildGeometry(builder.close()), false); } public void testBoundaryShapeWithInvalidTangentialHole() { @@ -657,7 +656,7 @@ public void testBoundaryShapeWithInvalidTangentialHole() { Exception e; e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J()); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); - e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry()); + e = expectThrows(IllegalArgumentException.class, () -> buildGeometry(builder.close())); assertThat(e.getMessage(), containsString("interior cannot share more than one point with the exterior")); } @@ -673,7 +672,7 @@ public void testBoundaryShape() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildGeometry(), false); + assertPolygon(buildGeometry(builder.close()), false); } public void testShapeWithAlternateOrientation() { @@ -686,7 +685,7 @@ public void testShapeWithAlternateOrientation() { ); assertPolygon(builder.close().buildS4J(), true); - assertPolygon(builder.close().buildGeometry(), false); + assertPolygon(buildGeometry(builder.close()), false); // cw: geo core will convert to ccw across the dateline builder = new PolygonBuilder(new CoordinatesBuilder() @@ -697,7 +696,7 @@ public void testShapeWithAlternateOrientation() { ); assertMultiPolygon(builder.close().buildS4J(), true); - assertMultiPolygon(builder.close().buildGeometry(), false); + assertMultiPolygon(buildGeometry(builder.close()), false); } public void testInvalidShapeWithConsecutiveDuplicatePoints() { @@ -711,7 +710,7 @@ public void testInvalidShapeWithConsecutiveDuplicatePoints() { Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J()); assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); - e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry()); + e = expectThrows(InvalidShapeException.class, () -> buildGeometry(builder.close())); assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); } @@ -774,7 +773,11 @@ public void testInvalidSelfCrossingPolygon() { ); Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().buildS4J()); assertThat(e.getMessage(), containsString("Self-intersection at or near point [")); - e = expectThrows(InvalidShapeException.class, () -> builder.close().buildGeometry()); + e = expectThrows(InvalidShapeException.class, () -> buildGeometry(builder.close())); assertThat(e.getMessage(), containsString("Self-intersection at or near point [")); } + + public Object buildGeometry(ShapeBuilder builder) { + return new GeometryIndexer(true).prepareForIndexing(builder.buildGeometry()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index a5311ed157c62..64f136a327269 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -198,7 +198,7 @@ public void testNoRelation() throws IOException { // see #3878 public void testThatXContentSerializationInsideOfArrayWorks() throws Exception { - EnvelopeBuilder envelopeBuilder = new EnvelopeBuilder(new Coordinate(0, 0), new Coordinate(10, 10)); + EnvelopeBuilder envelopeBuilder = new EnvelopeBuilder(new Coordinate(0, 10), new Coordinate(10, 0)); GeoShapeQueryBuilder geoQuery = QueryBuilders.geoShapeQuery("searchGeometry", envelopeBuilder); JsonXContent.contentBuilder().startArray().value(geoQuery).endArray(); } diff --git a/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java index 626f9b618d7b7..468d3bc0412f7 100644 --- a/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java @@ -161,19 +161,27 @@ private static GeometryCollection randomGeometryCollection(int level, int size = ESTestCase.randomIntBetween(1, 10); List shapes = new ArrayList<>(); for (int i = 0; i < size; i++) { - @SuppressWarnings("unchecked") Function geometry = ESTestCase.randomFrom( - GeometryTestUtils::randomCircle, - GeometryTestUtils::randomLine, - GeometryTestUtils::randomPoint, - GeometryTestUtils::randomPolygon, - GeometryTestUtils::randomMultiLine, - GeometryTestUtils::randomMultiPoint, - GeometryTestUtils::randomMultiPolygon, - hasAlt ? GeometryTestUtils::randomPoint : (b) -> randomRectangle(), - level < 3 ? (b) -> randomGeometryCollection(level + 1, b) : GeometryTestUtils::randomPoint // don't build too deep - ); - shapes.add(geometry.apply(hasAlt)); + shapes.add(randomGeometry(level, hasAlt)); } return new GeometryCollection<>(shapes); } + + public static Geometry randomGeometry(boolean hasAlt) { + return randomGeometry(0, hasAlt); + } + + private static Geometry randomGeometry(int level, boolean hasAlt) { + @SuppressWarnings("unchecked") Function geometry = ESTestCase.randomFrom( + GeometryTestUtils::randomCircle, + GeometryTestUtils::randomLine, + GeometryTestUtils::randomPoint, + GeometryTestUtils::randomPolygon, + GeometryTestUtils::randomMultiLine, + GeometryTestUtils::randomMultiPoint, + GeometryTestUtils::randomMultiPolygon, + hasAlt ? GeometryTestUtils::randomPoint : (b) -> randomRectangle(), + level < 3 ? (b) -> randomGeometryCollection(level + 1, b) : GeometryTestUtils::randomPoint // don't build too deep + ); + return geometry.apply(hasAlt); + } } From 8b905cc19c0f0eea212c4306557e825ebd11b62f Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 24 Jul 2019 11:09:06 -0700 Subject: [PATCH 07/51] [DOCS] Adds descriptions for put and start data frame transforms (#44724) --- .../data-frames/apis/put-transform.asciidoc | 9 ++++++++ .../data-frames/apis/start-transform.asciidoc | 23 ++++++++++++++----- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 5d5fcb482818d..6e2d1a33391ad 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -31,6 +31,15 @@ information, see {stack-ov}/security-privileges.html[Security privileges] and [[put-data-frame-transform-desc]] ==== {api-description-title} +This API defines a {dataframe-transform}, which copies data from source indices, +transforms it, and persists it into an entity-centric destination index. The +entities are defined by the set of `group_by` fields in the `pivot` object. You +can also think of the destination index as a two-dimensional tabular data +structure (known as a {dataframe}). The ID for each document in the +{dataframe} is generated from a hash of the entity, so there is a unique row +per entity. For more information, see +{stack-ov}/ml-dataframes.html[{dataframe-transforms-cap}]. + When the {dataframe-transform} is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/data-frames/apis/start-transform.asciidoc index 90f70efb4dbe2..e82f037952698 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/data-frames/apis/start-transform.asciidoc @@ -30,12 +30,23 @@ have `view_index_metadata` privileges on the source index for the [[start-data-frame-transform-desc]] ==== {api-description-title} -When a {dataframe-transform} starts, a series of validations occur to ensure its -success. If you deferred validation when you created the {dataframe-transform}, -they occur when you start the transform--with the exception of privilege checks. -If the user who created the transform does not have the required privileges on -the source and destination indices, the transform starts but then fails when -it attempts the unauthorized operation. +When you start a {dataframe-transform}, it creates the destination index if it +does not already exist. The `number_of_shards` is set to `1` and the +`auto_expand_replicas` is set to `0-1`. + +The transform deduces the mapping definitions from the source indices. For +scripted fields, it uses <>. If a field in the +destination index is created by `scripted_metric` or `bucket_script` +aggregations, the transform uses dynamic mappings unless a template exists or +the destination index already exists. Mapping definitions in the destination +index take precedence over dynamic mappings and templates. + +When the {dataframe-transform} starts, a series of validations occur to ensure +its success. If you deferred validation when you created the +{dataframe-transform}, they occur when you start the transform--with the +exception of privilege checks. If the user who created the transform does not +have the required privileges on the source and destination indices, the +transform starts but then fails when it attempts the unauthorized operation. [[start-data-frame-transform-path-parms]] ==== {api-path-parms-title} From 1327b586dea8acf882132c19247e74e736b67a74 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 24 Jul 2019 11:20:28 -0700 Subject: [PATCH 08/51] Convert logging related gradle classes to java (#44771) This commit converts the logging related classes (only used for vagrant) to java from groovy. relates #34459 --- .../gradle/LoggingOutputStream.groovy | 64 ---------- .../gradle/vagrant/BatsOverVagrantTask.groovy | 5 +- .../vagrant/TapLoggerOutputStream.groovy | 111 ---------------- .../gradle/vagrant/VagrantCommandTask.groovy | 6 +- .../vagrant/VagrantLoggerOutputStream.groovy | 101 --------------- .../gradle/LoggingOutputStream.java | 78 ++++++++++++ .../gradle/vagrant/TapLoggerOutputStream.java | 118 ++++++++++++++++++ .../vagrant/VagrantLoggerOutputStream.java | 80 ++++++++++++ 8 files changed, 279 insertions(+), 284 deletions(-) delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy deleted file mode 100644 index e2e2b7c954482..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggingOutputStream.groovy +++ /dev/null @@ -1,64 +0,0 @@ -package org.elasticsearch.gradle - -import org.gradle.api.logging.LogLevel -import org.gradle.api.logging.Logger - -/** - * Writes data passed to this stream as log messages. - * - * The stream will be flushed whenever a newline is detected. - * Allows setting an optional prefix before each line of output. - */ -public class LoggingOutputStream extends OutputStream { - - /** The starting length of the buffer */ - static final int DEFAULT_BUFFER_LENGTH = 4096 - - /** The buffer of bytes sent to the stream */ - byte[] buffer = new byte[DEFAULT_BUFFER_LENGTH] - - /** Offset of the start of unwritten data in the buffer */ - int start = 0 - - /** Offset of the end (semi-open) of unwritten data in the buffer */ - int end = 0 - - /** Logger to write stream data to */ - Logger logger - - /** Prefix to add before each line of output */ - String prefix = "" - - /** Log level to write log messages to */ - LogLevel level - - void write(final int b) throws IOException { - if (b == 0) return; - if (b == (int)'\n' as char) { - // always flush with newlines instead of adding to the buffer - flush() - return - } - - if (end == buffer.length) { - if (start != 0) { - // first try shifting the used buffer back to the beginning to make space - System.arraycopy(buffer, start, buffer, 0, end - start) - } else { - // need more space, extend the buffer - } - final int newBufferLength = buffer.length + DEFAULT_BUFFER_LENGTH; - final byte[] newBuffer = new byte[newBufferLength]; - System.arraycopy(buffer, 0, newBuffer, 0, buffer.length); - buffer = newBuffer; - } - - buffer[end++] = (byte) b; - } - - void flush() { - if (end == start) return - logger.log(level, prefix + new String(buffer, start, end - start)); - start = end - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy index af5d328dc0cad..1d85d8584bb79 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy @@ -40,9 +40,6 @@ public class BatsOverVagrantTask extends VagrantCommandTask { @Override protected OutputStream createLoggerOutputStream() { - return new TapLoggerOutputStream( - command: commandLine.join(' '), - factory: getProgressLoggerFactory(), - logger: logger) + return new TapLoggerOutputStream(logger, getProgressLoggerFactory().newOperation(boxName).setDescription(boxName)); } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy deleted file mode 100644 index 0be294fb00523..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.vagrant - -import org.elasticsearch.gradle.LoggingOutputStream -import org.gradle.api.GradleScriptException -import org.gradle.api.logging.Logger -import org.gradle.internal.logging.progress.ProgressLogger - -import java.util.regex.Matcher - -/** - * Adapts an OutputStream containing output from bats into a ProgressLogger - * and a Logger. Every test output goes to the ProgressLogger and all failures - * and non-test output goes to the Logger. That means you can always glance - * at the result of the last test and the cumulative pass/fail/skip stats and - * the failures are all logged. - * - * There is a Tap4j project but we can't use it because it wants to parse the - * entire TAP stream at once and won't parse it stream-wise. - */ -public class TapLoggerOutputStream extends LoggingOutputStream { - private final ProgressLogger progressLogger - private boolean isStarted = false - private final Logger logger - private int testsCompleted = 0 - private int testsFailed = 0 - private int testsSkipped = 0 - private Integer testCount - private String countsFormat - - TapLoggerOutputStream(Map args) { - logger = args.logger - progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) - progressLogger.setDescription("TAP output for `${args.command}`") - } - - @Override - public void flush() { - if (isStarted == false) { - progressLogger.started() - isStarted = true - } - if (end == start) return - line(new String(buffer, start, end - start)) - start = end - } - - void line(String line) { - // System.out.print "===> $line\n" - if (testCount == null) { - try { - testCount = line.split('\\.').last().toInteger() - def length = (testCount as String).length() - countsFormat = "%0${length}d" - countsFormat = "[$countsFormat|$countsFormat|$countsFormat/$countsFormat]" - return - } catch (Exception e) { - throw new GradleScriptException( - 'Error parsing first line of TAP stream!!', e) - } - } - Matcher m = line =~ /(?ok|not ok) \d+(? # skip (?\(.+\))?)? \[(?.+)\] (?.+)/ - if (!m.matches()) { - /* These might be failure report lines or comments or whatever. Its hard - to tell and it doesn't matter. */ - logger.warn(line) - return - } - boolean skipped = m.group('skip') != null - boolean success = !skipped && m.group('status') == 'ok' - String skipReason = m.group('skipReason') - String suiteName = m.group('suite') - String testName = m.group('test') - - String status - if (skipped) { - status = "SKIPPED" - testsSkipped++ - } else if (success) { - status = " OK" - testsCompleted++ - } else { - status = " FAILED" - testsFailed++ - } - - String counts = sprintf(countsFormat, - [testsCompleted, testsFailed, testsSkipped, testCount]) - progressLogger.progress("Tests $counts, $status [$suiteName] $testName") - if (!success) { - logger.warn(line) - } - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy index 161584938bde8..bcc612c7afacd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy @@ -78,11 +78,9 @@ public class VagrantCommandTask extends LoggedExec { } protected OutputStream createLoggerOutputStream() { - return new VagrantLoggerOutputStream( - command: commandLine.join(' '), - factory: getProgressLoggerFactory(), + return new VagrantLoggerOutputStream(getProgressLoggerFactory().newOperation(boxName + " " + command).setDescription(boxName), /* Vagrant tends to output a lot of stuff, but most of the important stuff starts with ==> $box */ - squashedPrefix: "==> $boxName: ") + "==> $boxName: ") } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy deleted file mode 100644 index f3031f73c236d..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.vagrant - -import org.elasticsearch.gradle.LoggingOutputStream -import org.gradle.internal.logging.progress.ProgressLogger - -/** - * Adapts an OutputStream being written to by vagrant into a ProcessLogger. It - * has three hacks to make the output nice: - * - * 1. Attempt to filter out the "unimportant" output from vagrant. Usually - * vagrant prefixes its more important output with "==> $boxname: ". The stuff - * that isn't prefixed that way can just be thrown out. - * - * 2. It also attempts to detect when vagrant does tricks assuming its writing - * to a terminal emulator and renders the output more like gradle users expect. - * This means that progress indicators for things like box downloading work and - * box importing look pretty good. - * - * 3. It catches lines that look like "==> $boxName ==> Heading text" and stores - * the text after the second arrow as a "heading" for use in annotating - * provisioning. It does this because provisioning can spit out _lots_ of text - * and its very easy to lose context when there isn't a scrollback. So we've - * sprinkled `echo "==> Heading text"` into the provisioning scripts for this - * to catch so it can render the output like - * "Heading text > stdout from the provisioner". - */ -public class VagrantLoggerOutputStream extends LoggingOutputStream { - private static final String HEADING_PREFIX = '==> ' - - private final ProgressLogger progressLogger - private boolean isStarted = false - private String squashedPrefix - private String lastLine = '' - private boolean inProgressReport = false - private String heading = '' - - VagrantLoggerOutputStream(Map args) { - progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) - progressLogger.setDescription("Vagrant output for `$args.command`") - squashedPrefix = args.squashedPrefix - } - - @Override - public void flush() { - if (isStarted == false) { - progressLogger.started() - isStarted = true - } - if (end == start) return - line(new String(buffer, start, end - start)) - start = end - } - - void line(String line) { - if (line.startsWith('\r\u001b')) { - /* We don't want to try to be a full terminal emulator but we want to - keep the escape sequences from leaking and catch _some_ of the - meaning. */ - line = line.substring(2) - if ('[K' == line) { - inProgressReport = true - } - return - } - if (line.startsWith(squashedPrefix)) { - line = line.substring(squashedPrefix.length()) - inProgressReport = false - lastLine = line - if (line.startsWith(HEADING_PREFIX)) { - line = line.substring(HEADING_PREFIX.length()) - heading = line + ' > ' - } else { - line = heading + line - } - } else if (inProgressReport) { - inProgressReport = false - line = lastLine + line - } else { - return - } - progressLogger.progress(line) - } -} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java new file mode 100644 index 0000000000000..8a1dfe16de28f --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggingOutputStream.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; + +/** + * Writes data passed to this stream as log messages. + * + * The stream will be flushed whenever a newline is detected. + * Allows setting an optional prefix before each line of output. + */ +public abstract class LoggingOutputStream extends OutputStream { + /** The starting length of the buffer */ + private static final int DEFAULT_BUFFER_LENGTH = 4096; + + /** The buffer of bytes sent to the stream */ + private byte[] buffer = new byte[DEFAULT_BUFFER_LENGTH]; + + /** Offset of the start of unwritten data in the buffer */ + private int start = 0; + + /** Offset of the end (semi-open) of unwritten data in the buffer */ + private int end = 0; + + @Override + public void write(final int b) throws IOException { + if (b == 0) return; + if (b == '\n') { + // always flush with newlines instead of adding to the buffer + flush(); + return; + } + + if (end == buffer.length) { + if (start != 0) { + // first try shifting the used buffer back to the beginning to make space + int len = end - start; + System.arraycopy(buffer, start, buffer, 0, len); + start = 0; + end = len; + } else { + // otherwise extend the buffer + buffer = Arrays.copyOf(buffer, buffer.length + DEFAULT_BUFFER_LENGTH); + } + } + + buffer[end++] = (byte) b; + } + + @Override + public void flush() { + if (end == start) return; + logLine(new String(buffer, start, end - start)); + start = end; + } + + protected abstract void logLine(String line); +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.java b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.java new file mode 100644 index 0000000000000..353b2687ad189 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.vagrant; + +import org.elasticsearch.gradle.LoggingOutputStream; +import org.gradle.api.GradleScriptException; +import org.gradle.api.logging.Logger; +import org.gradle.internal.logging.progress.ProgressLogger; + +import java.util.Formatter; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Adapts an OutputStream containing TAP output from bats into a ProgressLogger and a Logger. + * + * TAP (Test Anything Protocol, https://testanything.org) is used by BATS for its output format. + * + * Every test output goes to the ProgressLogger and all failures + * and non-test output goes to the Logger. That means you can always glance + * at the result of the last test and the cumulative pass/fail/skip stats and + * the failures are all logged. + * + * There is a Tap4j project but we can't use it because it wants to parse the + * entire TAP stream at once and won't parse it stream-wise. + */ +public class TapLoggerOutputStream extends LoggingOutputStream { + + private static final Pattern lineRegex = + Pattern.compile("(?ok|not ok) \\d+(? # skip (?\\(.+\\))?)? \\[(?.+)\\] (?.+)"); + + private final Logger logger; + private final ProgressLogger progressLogger; + private boolean isStarted = false; + private int testsCompleted = 0; + private int testsFailed = 0; + private int testsSkipped = 0; + private Integer testCount; + private String countsFormat; + + TapLoggerOutputStream(Logger logger, ProgressLogger progressLogger) { + this.logger = logger; + this.progressLogger = progressLogger; + } + + @Override + public void logLine(String line) { + if (isStarted == false) { + progressLogger.started("started"); + isStarted = true; + } + if (testCount == null) { + try { + int lastDot = line.lastIndexOf('.'); + testCount = Integer.parseInt(line.substring(lastDot + 1)); + int length = String.valueOf(testCount).length(); + String count = "%0" + length + "d"; + countsFormat = "[" + count +"|" + count + "|" + count + "/" + count + "]"; + return; + } catch (Exception e) { + throw new GradleScriptException("Error parsing first line of TAP stream!!", e); + } + } + Matcher m = lineRegex.matcher(line); + if (m.matches() == false) { + /* These might be failure report lines or comments or whatever. Its hard + to tell and it doesn't matter. */ + logger.warn(line); + return; + } + boolean skipped = m.group("skip") != null; + boolean success = skipped == false && m.group("status").equals("ok"); + String skipReason = m.group("skipReason"); + String suiteName = m.group("suite"); + String testName = m.group("test"); + + final String status; + if (skipped) { + status = "SKIPPED"; + testsSkipped++; + } else if (success) { + status = " OK"; + testsCompleted++; + } else { + status = " FAILED"; + testsFailed++; + } + + String counts = new Formatter().format(countsFormat, testsCompleted, testsFailed, testsSkipped, testCount).out().toString(); + progressLogger.progress("BATS " + counts + ", " + status + " [" + suiteName + "] " + testName); + if (success == false) { + logger.warn(line); + } + } + + @Override + public void close() { + flush(); + progressLogger.completed(); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.java b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.java new file mode 100644 index 0000000000000..2e4a612355637 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.vagrant; + +import org.elasticsearch.gradle.LoggingOutputStream; +import org.gradle.internal.logging.progress.ProgressLogger; + +public class VagrantLoggerOutputStream extends LoggingOutputStream { + private static final String HEADING_PREFIX = "==> "; + + private final ProgressLogger progressLogger; + private final String squashedPrefix; + private boolean isStarted = false; + private String lastLine = ""; + private boolean inProgressReport = false; + private String heading = ""; + + VagrantLoggerOutputStream(ProgressLogger progressLogger, String squashedPrefix) { + this.progressLogger = progressLogger; + this.squashedPrefix = squashedPrefix; + } + + @Override + protected void logLine(String line) { + if (isStarted == false) { + progressLogger.started("started"); + isStarted = true; + } + if (line.startsWith("\r\u001b")) { + /* We don't want to try to be a full terminal emulator but we want to + keep the escape sequences from leaking and catch _some_ of the + meaning. */ + line = line.substring(2); + if ("[K".equals(line)) { + inProgressReport = true; + } + return; + } + if (line.startsWith(squashedPrefix)) { + line = line.substring(squashedPrefix.length()); + inProgressReport = false; + lastLine = line; + if (line.startsWith(HEADING_PREFIX)) { + line = line.substring(HEADING_PREFIX.length()); + heading = line + " > "; + } else { + line = heading + line; + } + } else if (inProgressReport) { + inProgressReport = false; + line = lastLine + line; + } else { + return; + } + progressLogger.progress(line); + } + + @Override + public void close() { + flush(); + progressLogger.completed(); + } +} From 0482894e578e9f1e75998c8338a92c3fb339e910 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 24 Jul 2019 15:41:31 -0500 Subject: [PATCH 09/51] [Geo] Refactor GeoShapeQueryBuilder to derive from AbstractGeometryQueryBuilder (#44780) Refactors GeoShapeQueryBuilder to derive from a new AbstractGeometryQueryBuilder that provides common parsing and build logic for spatial geometries. This will allow development of custom geometry queries by extending AbstractGeometryQueryBuilder preventing duplication of common spatial query logic. --- .../query/AbstractGeometryQueryBuilder.java | 631 ++++++++++++ .../index/query/GeoShapeQueryBuilder.java | 896 +++++------------- 2 files changed, 890 insertions(+), 637 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java new file mode 100644 index 0000000000000..2edbae206506e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java @@ -0,0 +1,631 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.geo.GeoJson; +import org.elasticsearch.common.geo.GeometryIO; +import org.elasticsearch.common.geo.GeometryParser; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.index.mapper.MappedFieldType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +/** + * Base {@link QueryBuilder} that builds a Geometry Query + */ +public abstract class AbstractGeometryQueryBuilder> extends AbstractQueryBuilder { + + static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [geo_shape] queries. " + + "The type should no longer be specified in the [indexed_shape] section."; + + public static final String DEFAULT_SHAPE_INDEX_NAME = "shapes"; + public static final String DEFAULT_SHAPE_FIELD_NAME = "shape"; + public static final ShapeRelation DEFAULT_SHAPE_RELATION = ShapeRelation.INTERSECTS; + + /** registry of content types this query can be used with */ + protected final List validContentTypes = new ArrayList<>(validContentTypes()); + + /** The default value for ignore_unmapped. */ + public static final boolean DEFAULT_IGNORE_UNMAPPED = false; + + protected static final ParseField SHAPE_FIELD = new ParseField("shape"); + protected static final ParseField RELATION_FIELD = new ParseField("relation"); + protected static final ParseField INDEXED_SHAPE_FIELD = new ParseField("indexed_shape"); + protected static final ParseField SHAPE_ID_FIELD = new ParseField("id"); + protected static final ParseField SHAPE_TYPE_FIELD = new ParseField("type"); + protected static final ParseField SHAPE_INDEX_FIELD = new ParseField("index"); + protected static final ParseField SHAPE_PATH_FIELD = new ParseField("path"); + protected static final ParseField SHAPE_ROUTING_FIELD = new ParseField("routing"); + protected static final ParseField IGNORE_UNMAPPED_FIELD = new ParseField("ignore_unmapped"); + + protected final String fieldName; + protected final Supplier supplier; + + protected final String indexedShapeId; + protected final String indexedShapeType; + + protected Geometry shape; + protected String indexedShapeIndex = DEFAULT_SHAPE_INDEX_NAME; + protected String indexedShapePath = DEFAULT_SHAPE_FIELD_NAME; + protected String indexedShapeRouting; + + protected ShapeRelation relation = DEFAULT_SHAPE_RELATION; + + protected boolean ignoreUnmapped = DEFAULT_IGNORE_UNMAPPED; + + /** + * Creates a new ShapeQueryBuilder whose Query will be against the given + * field name using the given Shape + * + * @param fieldName + * Name of the field that will be queried + * @param shape + * Shape used in the Query + * @deprecated use {@link #AbstractGeometryQueryBuilder(String, Geometry)} instead + */ + @Deprecated + protected AbstractGeometryQueryBuilder(String fieldName, ShapeBuilder shape) { + this(fieldName, shape == null ? null : shape.buildGeometry(), null, null); + } + + /** + * Creates a new AbstractGeometryQueryBuilder whose Query will be against the given + * field name using the given Shape + * + * @param fieldName + * Name of the field that will be queried + * @param shape + * Shape used in the Query + */ + public AbstractGeometryQueryBuilder(String fieldName, Geometry shape) { + this(fieldName, shape, null, null); + } + + /** + * Creates a new ShapeQueryBuilder whose Query will be against the given + * field name and will use the Shape found with the given ID + * + * @param fieldName + * Name of the field that will be filtered + * @param indexedShapeId + * ID of the indexed Shape that will be used in the Query + */ + protected AbstractGeometryQueryBuilder(String fieldName, String indexedShapeId) { + this(fieldName, (Geometry) null, indexedShapeId, null); + } + + /** + * Creates a new AbstractGeometryQueryBuilder whose Query will be against the given + * field name and will use the Shape found with the given ID in the given + * type + * + * @param fieldName + * Name of the field that will be filtered + * @param indexedShapeId + * ID of the indexed Shape that will be used in the Query + * @param indexedShapeType + * Index type of the indexed Shapes + * @deprecated use {@link #AbstractGeometryQueryBuilder(String, String)} instead + */ + @Deprecated + protected AbstractGeometryQueryBuilder(String fieldName, String indexedShapeId, String indexedShapeType) { + this(fieldName, (Geometry) null, indexedShapeId, indexedShapeType); + } + + protected AbstractGeometryQueryBuilder(String fieldName, Geometry shape, String indexedShapeId, @Nullable String indexedShapeType) { + if (fieldName == null) { + throw new IllegalArgumentException("fieldName is required"); + } + if (shape == null && indexedShapeId == null) { + throw new IllegalArgumentException("either shape or indexedShapeId is required"); + } + this.fieldName = fieldName; + this.shape = shape; + this.indexedShapeId = indexedShapeId; + this.indexedShapeType = indexedShapeType; + this.supplier = null; + } + + protected AbstractGeometryQueryBuilder(String fieldName, Supplier supplier, String indexedShapeId, + @Nullable String indexedShapeType) { + this.fieldName = fieldName; + this.shape = null; + this.supplier = supplier; + this.indexedShapeId = indexedShapeId; + this.indexedShapeType = indexedShapeType; + } + + /** + * Read from a stream. + */ + protected AbstractGeometryQueryBuilder(StreamInput in) throws IOException { + super(in); + fieldName = in.readString(); + if (in.readBoolean()) { + shape = GeometryIO.readGeometry(in); + indexedShapeId = null; + indexedShapeType = null; + } else { + shape = null; + indexedShapeId = in.readOptionalString(); + indexedShapeType = in.readOptionalString(); + indexedShapeIndex = in.readOptionalString(); + indexedShapePath = in.readOptionalString(); + indexedShapeRouting = in.readOptionalString(); + } + relation = ShapeRelation.readFromStream(in); + ignoreUnmapped = in.readBoolean(); + supplier = null; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + if (supplier != null) { + throw new IllegalStateException("supplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"); + } + out.writeString(fieldName); + boolean hasShape = shape != null; + out.writeBoolean(hasShape); + if (hasShape) { + GeometryIO.writeGeometry(out, shape); + } else { + out.writeOptionalString(indexedShapeId); + out.writeOptionalString(indexedShapeType); + out.writeOptionalString(indexedShapeIndex); + out.writeOptionalString(indexedShapePath); + out.writeOptionalString(indexedShapeRouting); + } + relation.writeTo(out); + out.writeBoolean(ignoreUnmapped); + } + + /** + * @return the name of the field that will be queried + */ + public String fieldName() { + return fieldName; + } + + /** + * Sets the shapeBuilder for the query shape. + * + * @param geometry the geometry + * @return this + */ + public QB shape(Geometry geometry) { + if (geometry == null) { + throw new IllegalArgumentException("No geometry defined"); + } + this.shape = geometry; + return (QB)this; + } + + /** + * @return the shape used in the Query + */ + public Geometry shape() { + return shape; + } + + /** + * @return the ID of the indexed Shape that will be used in the Query + */ + public String indexedShapeId() { + return indexedShapeId; + } + + /** + * @return the document type of the indexed Shape that will be used in the + * Query + * + * @deprecated Types are in the process of being removed. + */ + @Deprecated + public String indexedShapeType() { + return indexedShapeType; + } + + /** + * Sets the name of the index where the indexed Shape can be found + * + * @param indexedShapeIndex Name of the index where the indexed Shape is + * @return this + */ + public QB indexedShapeIndex(String indexedShapeIndex) { + this.indexedShapeIndex = indexedShapeIndex; + return (QB)this; + } + + /** + * @return the index name for the indexed Shape that will be used in the + * Query + */ + public String indexedShapeIndex() { + return indexedShapeIndex; + } + + /** + * Sets the path of the field in the indexed Shape document that has the Shape itself + * + * @param indexedShapePath Path of the field where the Shape itself is defined + * @return this + */ + public QB indexedShapePath(String indexedShapePath) { + this.indexedShapePath = indexedShapePath; + return (QB)this; + } + + /** + * @return the path of the indexed Shape that will be used in the Query + */ + public String indexedShapePath() { + return indexedShapePath; + } + + /** + * Sets the optional routing to the indexed Shape that will be used in the query + * + * @param indexedShapeRouting indexed shape routing + * @return this + */ + public QB indexedShapeRouting(String indexedShapeRouting) { + this.indexedShapeRouting = indexedShapeRouting; + return (QB)this; + } + + + /** + * @return the optional routing to the indexed Shape that will be used in the + * Query + */ + public String indexedShapeRouting() { + return indexedShapeRouting; + } + + /** + * Sets the relation of query shape and indexed shape. + * + * @param relation relation of the shapes + * @return this + */ + public QB relation(ShapeRelation relation) { + if (relation == null) { + throw new IllegalArgumentException("No Shape Relation defined"); + } + this.relation = relation; + return (QB)this; + } + + /** + * @return the relation of query shape and indexed shape to use in the Query + */ + public ShapeRelation relation() { + return relation; + } + + /** + * Sets whether the query builder should ignore unmapped fields (and run a + * {@link MatchNoDocsQuery} in place of this query) or throw an exception if + * the field is unmapped. + */ + public AbstractGeometryQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) { + this.ignoreUnmapped = ignoreUnmapped; + return this; + } + + /** + * Gets whether the query builder will ignore unmapped fields (and run a + * {@link MatchNoDocsQuery} in place of this query) or throw an exception if + * the field is unmapped. + */ + public boolean ignoreUnmapped() { + return ignoreUnmapped; + } + + /** list of content types this shape query is compatible with */ + protected abstract List validContentTypes(); + /** builds the appropriate lucene shape query */ + protected abstract Query buildShapeQuery(QueryShardContext context, MappedFieldType fieldType); + /** returns expected content type for this query */ + protected abstract String queryFieldType(); + /** writes the xcontent specific to this shape query */ + protected abstract void doShapeQueryXContent(XContentBuilder builder, Params params) throws IOException; + /** creates a new ShapeQueryBuilder from the provided field name and shape builder */ + protected abstract AbstractGeometryQueryBuilder newShapeQueryBuilder(String fieldName, Geometry shape); + /** creates a new ShapeQueryBuilder from the provided field name, supplier, indexed shape id, and indexed shape type */ + protected abstract AbstractGeometryQueryBuilder newShapeQueryBuilder(String fieldName, Supplier shapeSupplier, + String indexedShapeId, String indexedShapeType); + + /** returns true if the provided field type is valid for this query */ + protected boolean isValidContentType(String typeName) { + return validContentTypes.contains(typeName); + } + + @Override + protected Query doToQuery(QueryShardContext context) { + if (shape == null || supplier != null) { + throw new UnsupportedOperationException("query must be rewritten first"); + } + final MappedFieldType fieldType = context.fieldMapper(fieldName); + if (fieldType == null) { + if (ignoreUnmapped) { + return new MatchNoDocsQuery(); + } else { + throw new QueryShardException(context, "failed to find " + queryFieldType() + " field [" + fieldName + "]"); + } + } + + return buildShapeQuery(context, fieldType); + } + + /** + * Fetches the Shape with the given ID in the given type and index. + * + * @param getRequest + * GetRequest containing index, type and id + * @param path + * Name or path of the field in the Shape Document where the + * Shape itself is located + */ + private void fetch(Client client, GetRequest getRequest, String path, ActionListener listener) { + getRequest.preference("_local"); + client.get(getRequest, new ActionListener(){ + + @Override + public void onResponse(GetResponse response) { + try { + if (!response.isExists()) { + throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() + + "] not found"); + } + if (response.isSourceEmpty()) { + throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() + + "] source disabled"); + } + + String[] pathElements = path.split("\\."); + int currentPathSlot = 0; + + // It is safe to use EMPTY here because this never uses namedObject + try (XContentParser parser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, response.getSourceAsBytesRef())) { + XContentParser.Token currentToken; + while ((currentToken = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (currentToken == XContentParser.Token.FIELD_NAME) { + if (pathElements[currentPathSlot].equals(parser.currentName())) { + parser.nextToken(); + if (++currentPathSlot == pathElements.length) { + listener.onResponse(new GeometryParser(true, true, true).parse(parser)); + return; + } + } else { + parser.nextToken(); + parser.skipChildren(); + } + } + } + throw new IllegalStateException("Shape with name [" + getRequest.id() + "] found but missing " + path + " field"); + } + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(getWriteableName()); + + builder.startObject(fieldName); + + if (shape != null) { + builder.field(SHAPE_FIELD.getPreferredName()); + GeoJson.toXContent(shape, builder, params); + } else { + builder.startObject(INDEXED_SHAPE_FIELD.getPreferredName()) + .field(SHAPE_ID_FIELD.getPreferredName(), indexedShapeId); + if (indexedShapeType != null) { + builder.field(SHAPE_TYPE_FIELD.getPreferredName(), indexedShapeType); + } + if (indexedShapeIndex != null) { + builder.field(SHAPE_INDEX_FIELD.getPreferredName(), indexedShapeIndex); + } + if (indexedShapePath != null) { + builder.field(SHAPE_PATH_FIELD.getPreferredName(), indexedShapePath); + } + if (indexedShapeRouting != null) { + builder.field(SHAPE_ROUTING_FIELD.getPreferredName(), indexedShapeRouting); + } + builder.endObject(); + } + + if(relation != null) { + builder.field(RELATION_FIELD.getPreferredName(), relation.getRelationName()); + } + + doShapeQueryXContent(builder, params); + builder.endObject(); + builder.field(IGNORE_UNMAPPED_FIELD.getPreferredName(), ignoreUnmapped); + + printBoostAndQueryName(builder); + + builder.endObject(); + } + + @Override + protected boolean doEquals(AbstractGeometryQueryBuilder other) { + return Objects.equals(fieldName, other.fieldName) + && Objects.equals(indexedShapeId, other.indexedShapeId) + && Objects.equals(indexedShapeIndex, other.indexedShapeIndex) + && Objects.equals(indexedShapePath, other.indexedShapePath) + && Objects.equals(indexedShapeType, other.indexedShapeType) + && Objects.equals(indexedShapeRouting, other.indexedShapeRouting) + && Objects.equals(relation, other.relation) + && Objects.equals(shape, other.shape) + && Objects.equals(supplier, other.supplier) + && Objects.equals(ignoreUnmapped, other.ignoreUnmapped); + } + + @Override + protected int doHashCode() { + return Objects.hash(fieldName, indexedShapeId, indexedShapeIndex, + indexedShapePath, indexedShapeType, indexedShapeRouting, relation, shape, ignoreUnmapped, supplier); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (supplier != null) { + return supplier.get() == null ? this : newShapeQueryBuilder(this.fieldName, supplier.get()).relation(relation); + } else if (this.shape == null) { + SetOnce supplier = new SetOnce<>(); + queryRewriteContext.registerAsyncAction((client, listener) -> { + GetRequest getRequest; + if (indexedShapeType == null) { + getRequest = new GetRequest(indexedShapeIndex, indexedShapeId); + } else { + getRequest = new GetRequest(indexedShapeIndex, indexedShapeType, indexedShapeId); + } + getRequest.routing(indexedShapeRouting); + fetch(client, getRequest, indexedShapePath, ActionListener.wrap(builder-> { + supplier.set(builder); + listener.onResponse(null); + }, listener::onFailure)); + }); + return newShapeQueryBuilder(this.fieldName, supplier::get, this.indexedShapeId, this.indexedShapeType).relation(relation); + } + return this; + } + + /** local class that encapsulates xcontent parsed shape parameters */ + protected abstract static class ParsedShapeQueryParams { + public String fieldName; + public ShapeRelation relation; + public ShapeBuilder shape; + + public String id = null; + public String type = null; + public String index = null; + public String shapePath = null; + public String shapeRouting = null; + + public float boost; + public String queryName; + public boolean ignoreUnmapped; + + protected abstract boolean parseXContentField(XContentParser parser) throws IOException; + } + + public static ParsedShapeQueryParams parsedParamsFromXContent(XContentParser parser, ParsedShapeQueryParams params) + throws IOException { + String fieldName = null; + XContentParser.Token token; + String currentFieldName = null; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (fieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "point specified twice. [" + currentFieldName + "]"); + } + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + token = parser.nextToken(); + if (RELATION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.relation = ShapeRelation.getRelationByName(parser.text()); + if (params.relation == null) { + throw new ParsingException(parser.getTokenLocation(), "Unknown shape operation [" + parser.text() + " ]"); + } + } else if (params.parseXContentField(parser)) { + continue; + } else if (INDEXED_SHAPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (SHAPE_ID_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.id = parser.text(); + } else if (SHAPE_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.type = parser.text(); + } else if (SHAPE_INDEX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.index = parser.text(); + } else if (SHAPE_PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.shapePath = parser.text(); + } else if (SHAPE_ROUTING_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.shapeRouting = parser.text(); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "unknown token [" + token + + "] after [" + currentFieldName + "]"); + } + } + } else { + throw new ParsingException(parser.getTokenLocation(), "query does not support [" + currentFieldName + "]"); + } + } + } + } else if (token.isValue()) { + if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.boost = parser.floatValue(); + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.queryName = parser.text(); + } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + params.ignoreUnmapped = parser.booleanValue(); + } else { + throw new ParsingException(parser.getTokenLocation(), "query does not support [" + currentFieldName + "]"); + } + } + } + params.fieldName = fieldName; + return params; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index 654aaf9751ca0..f9bb9680bc777 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -32,19 +32,11 @@ import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.geo.GeoJson; import org.elasticsearch.common.geo.GeoShapeType; -import org.elasticsearch.common.geo.GeometryIO; import org.elasticsearch.common.geo.GeometryIndexer; -import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.common.geo.builders.EnvelopeBuilder; @@ -60,10 +52,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.geo.geometry.Circle; import org.elasticsearch.geo.geometry.Geometry; @@ -76,6 +65,7 @@ import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Rectangle; import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.locationtech.jts.geom.Coordinate; @@ -83,6 +73,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.function.Supplier; @@ -90,54 +81,17 @@ import static org.elasticsearch.index.mapper.GeoShapeFieldMapper.toLucenePolygon; /** - * {@link QueryBuilder} that builds a GeoShape Query + * Derived {@link AbstractGeometryQueryBuilder} that builds a lat, lon GeoShape Query */ -public class GeoShapeQueryBuilder extends AbstractQueryBuilder { +public class GeoShapeQueryBuilder extends AbstractGeometryQueryBuilder { public static final String NAME = "geo_shape"; private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(GeoShapeQueryBuilder.class)); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [geo_shape] queries. " + - "The type should no longer be specified in the [indexed_shape] section."; - public static final String DEFAULT_SHAPE_INDEX_NAME = "shapes"; - public static final String DEFAULT_SHAPE_FIELD_NAME = "shape"; - public static final ShapeRelation DEFAULT_SHAPE_RELATION = ShapeRelation.INTERSECTS; - - /** - * The default value for ignore_unmapped. - */ - public static final boolean DEFAULT_IGNORE_UNMAPPED = false; - - private static final ParseField SHAPE_FIELD = new ParseField("shape"); - private static final ParseField STRATEGY_FIELD = new ParseField("strategy"); - private static final ParseField RELATION_FIELD = new ParseField("relation"); - private static final ParseField INDEXED_SHAPE_FIELD = new ParseField("indexed_shape"); - private static final ParseField SHAPE_ID_FIELD = new ParseField("id"); - private static final ParseField SHAPE_TYPE_FIELD = new ParseField("type"); - private static final ParseField SHAPE_INDEX_FIELD = new ParseField("index"); - private static final ParseField SHAPE_PATH_FIELD = new ParseField("path"); - private static final ParseField SHAPE_ROUTING_FIELD = new ParseField("routing"); - private static final ParseField IGNORE_UNMAPPED_FIELD = new ParseField("ignore_unmapped"); - - private final String fieldName; - - private final Geometry shape; - private final Supplier supplier; + protected static final ParseField STRATEGY_FIELD = new ParseField("strategy"); private SpatialStrategy strategy; - private final String indexedShapeId; - private final String indexedShapeType; - - - private String indexedShapeIndex = DEFAULT_SHAPE_INDEX_NAME; - private String indexedShapePath = DEFAULT_SHAPE_FIELD_NAME; - private String indexedShapeRouting; - - private ShapeRelation relation = DEFAULT_SHAPE_RELATION; - - private boolean ignoreUnmapped = DEFAULT_IGNORE_UNMAPPED; - /** * Creates a new GeoShapeQueryBuilder whose Query will be against the given * field name using the given Shape @@ -146,12 +100,9 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder shapeSupplier, String indexedShapeId, + @Nullable String indexedShapeType) { + super(fieldName, shapeSupplier, indexedShapeId, indexedShapeType); } /** @@ -196,108 +141,36 @@ public GeoShapeQueryBuilder(String fieldName, String indexedShapeId) { */ @Deprecated public GeoShapeQueryBuilder(String fieldName, String indexedShapeId, String indexedShapeType) { - this(fieldName, (Geometry) null, indexedShapeId, indexedShapeType); - } - - private GeoShapeQueryBuilder(String fieldName, Geometry shape, String indexedShapeId, @Nullable String indexedShapeType) { - if (fieldName == null) { - throw new IllegalArgumentException("fieldName is required"); - } - if (shape == null && indexedShapeId == null) { - throw new IllegalArgumentException("either shape or indexedShapeId is required"); - } - this.fieldName = fieldName; - this.shape = shape; - this.indexedShapeId = indexedShapeId; - this.indexedShapeType = indexedShapeType; - this.supplier = null; - } - - private GeoShapeQueryBuilder(String fieldName, Supplier supplier, String indexedShapeId, - @Nullable String indexedShapeType) { - this.fieldName = fieldName; - this.shape = null; - this.supplier = supplier; - this.indexedShapeId = indexedShapeId; - this.indexedShapeType = indexedShapeType; + super(fieldName, indexedShapeId, indexedShapeType); } /** - * Read from a stream. + * Creates a new GeoShapeQueryBuilder whose Query will be against the given + * field name and will use the Shape found with the given ID + * + * @param fieldName + * Name of the field that will be filtered + * @param indexedShapeId + * ID of the indexed Shape that will be used in the Query */ + public GeoShapeQueryBuilder(String fieldName, String indexedShapeId) { + super(fieldName, indexedShapeId); + } + public GeoShapeQueryBuilder(StreamInput in) throws IOException { super(in); - fieldName = in.readString(); - if (in.readBoolean()) { - shape = GeometryIO.readGeometry(in); - indexedShapeId = null; - indexedShapeType = null; - } else { - shape = null; - indexedShapeId = in.readOptionalString(); - indexedShapeType = in.readOptionalString(); - indexedShapeIndex = in.readOptionalString(); - indexedShapePath = in.readOptionalString(); - indexedShapeRouting = in.readOptionalString(); - } - relation = ShapeRelation.readFromStream(in); strategy = in.readOptionalWriteable(SpatialStrategy::readFromStream); - ignoreUnmapped = in.readBoolean(); - supplier = null; } @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (supplier != null) { - throw new IllegalStateException("supplier must be null, can't serialize suppliers, missing a rewriteAndFetch?"); - } - out.writeString(fieldName); - boolean hasShape = shape != null; - out.writeBoolean(hasShape); - if (hasShape) { - GeometryIO.writeGeometry(out, shape);; - } else { - out.writeOptionalString(indexedShapeId); - out.writeOptionalString(indexedShapeType); - out.writeOptionalString(indexedShapeIndex); - out.writeOptionalString(indexedShapePath); - out.writeOptionalString(indexedShapeRouting); - } - relation.writeTo(out); + super.doWriteTo(out); out.writeOptionalWriteable(strategy); - out.writeBoolean(ignoreUnmapped); - } - - /** - * @return the name of the field that will be queried - */ - public String fieldName() { - return fieldName; - } - - /** - * @return the shape used in the Query - */ - public Geometry shape() { - return shape; - } - - /** - * @return the ID of the indexed Shape that will be used in the Query - */ - public String indexedShapeId() { - return indexedShapeId; } - /** - * @return the document type of the indexed Shape that will be used in the - * Query - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public String indexedShapeType() { - return indexedShapeType; + @Override + public String getWriteableName() { + return NAME; } /** @@ -312,12 +185,11 @@ public String indexedShapeType() { public GeoShapeQueryBuilder strategy(SpatialStrategy strategy) { if (strategy != null && strategy == SpatialStrategy.TERM && relation != ShapeRelation.INTERSECTS) { throw new IllegalArgumentException("strategy [" + strategy.getStrategyName() + "] only supports relation [" - + ShapeRelation.INTERSECTS.getRelationName() + "] found relation [" + relation.getRelationName() + "]"); + + ShapeRelation.INTERSECTS.getRelationName() + "] found relation [" + relation.getRelationName() + "]"); } this.strategy = strategy; return this; } - /** * @return The spatial strategy to use for building the geo shape Query */ @@ -325,122 +197,39 @@ public SpatialStrategy strategy() { return strategy; } - /** - * Sets the name of the index where the indexed Shape can be found - * - * @param indexedShapeIndex Name of the index where the indexed Shape is - * @return this - */ - public GeoShapeQueryBuilder indexedShapeIndex(String indexedShapeIndex) { - this.indexedShapeIndex = indexedShapeIndex; - return this; - } - - /** - * @return the index name for the indexed Shape that will be used in the - * Query - */ - public String indexedShapeIndex() { - return indexedShapeIndex; - } - - /** - * Sets the path of the field in the indexed Shape document that has the Shape itself - * - * @param indexedShapePath Path of the field where the Shape itself is defined - * @return this - */ - public GeoShapeQueryBuilder indexedShapePath(String indexedShapePath) { - this.indexedShapePath = indexedShapePath; - return this; - } - - /** - * @return the path of the indexed Shape that will be used in the Query - */ - public String indexedShapePath() { - return indexedShapePath; - } - - /** - * Sets the optional routing to the indexed Shape that will be used in the query - * - * @param indexedShapeRouting indexed shape routing - * @return this - */ - public GeoShapeQueryBuilder indexedShapeRouting(String indexedShapeRouting) { - this.indexedShapeRouting = indexedShapeRouting; - return this; + @Override + protected List validContentTypes() { + return Arrays.asList(BaseGeoShapeFieldMapper.CONTENT_TYPE); } - - /** - * @return the optional routing to the indexed Shape that will be used in the - * Query - */ - public String indexedShapeRouting() { - return indexedShapeRouting; + @Override + public String queryFieldType() { + return BaseGeoShapeFieldMapper.CONTENT_TYPE; } - /** - * Sets the relation of query shape and indexed shape. - * - * @param relation relation of the shapes - * @return this - */ - public GeoShapeQueryBuilder relation(ShapeRelation relation) { - if (relation == null) { - throw new IllegalArgumentException("No Shape Relation defined"); - } - if (SpatialStrategy.TERM.equals(strategy) && relation != ShapeRelation.INTERSECTS) { - throw new IllegalArgumentException("current strategy [" + strategy.getStrategyName() + "] only supports relation [" - + ShapeRelation.INTERSECTS.getRelationName() + "] found relation [" + relation.getRelationName() + "]"); + @Override + public void doShapeQueryXContent(XContentBuilder builder, Params params) throws IOException { + if (strategy != null) { + builder.field(STRATEGY_FIELD.getPreferredName(), strategy.getStrategyName()); } - this.relation = relation; - return this; } - /** - * @return the relation of query shape and indexed shape to use in the Query - */ - public ShapeRelation relation() { - return relation; - } - - /** - * Sets whether the query builder should ignore unmapped fields (and run a - * {@link MatchNoDocsQuery} in place of this query) or throw an exception if - * the field is unmapped. - */ - public GeoShapeQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) { - this.ignoreUnmapped = ignoreUnmapped; - return this; + @Override + protected GeoShapeQueryBuilder newShapeQueryBuilder(String fieldName, Geometry shape) { + return new GeoShapeQueryBuilder(fieldName, shape); } - /** - * Gets whether the query builder will ignore unmapped fields (and run a - * {@link MatchNoDocsQuery} in place of this query) or throw an exception if - * the field is unmapped. - */ - public boolean ignoreUnmapped() { - return ignoreUnmapped; + @Override + protected GeoShapeQueryBuilder newShapeQueryBuilder(String fieldName, Supplier shapeSupplier, + String indexedShapeId, String indexedShapeType) { + return new GeoShapeQueryBuilder(fieldName, shapeSupplier, indexedShapeId, indexedShapeType); } @Override - protected Query doToQuery(QueryShardContext context) { - if (shape == null || supplier != null) { - throw new UnsupportedOperationException("query must be rewritten first"); - } - final MappedFieldType fieldType = context.fieldMapper(fieldName); - if (fieldType == null) { - if (ignoreUnmapped) { - return new MatchNoDocsQuery(); - } else { - throw new QueryShardException(context, "failed to find geo_shape field [" + fieldName + "]"); - } - } else if (fieldType.typeName().equals(BaseGeoShapeFieldMapper.CONTENT_TYPE) == false) { + public Query buildShapeQuery(QueryShardContext context, MappedFieldType fieldType) { + if (fieldType.typeName().equals(BaseGeoShapeFieldMapper.CONTENT_TYPE) == false) { throw new QueryShardException(context, - "Field [" + fieldName + "] is not of type [geo_shape] but of type [" + fieldType.typeName() + "]"); + "Field [" + fieldName + "] is not of type [" + queryFieldType() + "] but of type [" + fieldType.typeName() + "]"); } final BaseGeoShapeFieldMapper.BaseGeoShapeFieldType ft = (BaseGeoShapeFieldMapper.BaseGeoShapeFieldType) fieldType; @@ -471,13 +260,41 @@ protected Query doToQuery(QueryShardContext context) { return query; } + public static SpatialArgs getArgs(Geometry shape, ShapeRelation relation) { + switch (relation) { + case DISJOINT: + return new SpatialArgs(SpatialOperation.IsDisjointTo, buildS4J(shape)); + case INTERSECTS: + return new SpatialArgs(SpatialOperation.Intersects, buildS4J(shape)); + case WITHIN: + return new SpatialArgs(SpatialOperation.IsWithin, buildS4J(shape)); + case CONTAINS: + return new SpatialArgs(SpatialOperation.Contains, buildS4J(shape)); + default: + throw new IllegalArgumentException("invalid relation [" + relation + "]"); + } + } + + /** + * Builds JTS shape from a geometry + * + * This method is needed to handle legacy indices and will be removed when we no longer need to build JTS shapes + */ + private static Shape buildS4J(Geometry geometry) { + return geometryToShapeBuilder(geometry).buildS4J(); + } + private Query getVectorQuery(QueryShardContext context, Geometry queryShape) { // CONTAINS queries are not yet supported by VECTOR strategy if (relation == ShapeRelation.CONTAINS) { throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]"); } + // wrap geoQuery as a ConstantScoreQuery + return getVectorQueryFromShape(context, queryShape); + } + protected Query getVectorQueryFromShape(QueryShardContext context, Geometry queryShape) { // TODO: Move this to QueryShardContext GeometryIndexer geometryIndexer = new GeometryIndexer(true); @@ -486,463 +303,268 @@ private Query getVectorQuery(QueryShardContext context, Geometry queryShape) { if (processedShape == null) { return new MatchNoDocsQuery(); } + return queryShape.visit(new ShapeVisitor(context)); + } - return processedShape.visit(new GeometryVisitor() { + public static ShapeBuilder geometryToShapeBuilder(Geometry geometry) { + ShapeBuilder shapeBuilder = geometry.visit(new GeometryVisitor<>() { @Override - public Query visit(Circle circle) { - throw new QueryShardException(context, "Field [" + fieldName + "] found and unknown shape Circle"); + public ShapeBuilder visit(Circle circle) { + throw new UnsupportedOperationException("circle is not supported"); } @Override - public Query visit(GeometryCollection collection) { - BooleanQuery.Builder bqb = new BooleanQuery.Builder(); - visit(bqb, collection); - return bqb.build(); - } - - private void visit(BooleanQuery.Builder bqb, GeometryCollection collection) { - for (Geometry shape : collection) { - if (shape instanceof MultiPoint) { - // Flatten multipoints - visit(bqb, (GeometryCollection) shape); - } else { - bqb.add(shape.visit(this), BooleanClause.Occur.SHOULD); - } + public ShapeBuilder visit(GeometryCollection collection) { + GeometryCollectionBuilder shapes = new GeometryCollectionBuilder(); + for (Geometry geometry : collection) { + shapes.shape(geometry.visit(this)); } + return shapes; } @Override - public Query visit(org.elasticsearch.geo.geometry.Line line) { - return LatLonShape.newLineQuery(fieldName(), relation.getLuceneRelation(), new Line(line.getLats(), line.getLons())); - } - - @Override - public Query visit(LinearRing ring) { - throw new QueryShardException(context, "Field [" + fieldName + "] found and unsupported shape LinearRing"); - } - - @Override - public Query visit(MultiLine multiLine) { - Line[] lines = new Line[multiLine.size()]; - for (int i=0; i visit(org.elasticsearch.geo.geometry.Line line) { + List coordinates = new ArrayList<>(); + for (int i = 0; i < line.length(); i++) { + coordinates.add(new Coordinate(line.getLon(i), line.getLat(i), line.getAlt(i))); } - return LatLonShape.newLineQuery(fieldName(), relation.getLuceneRelation(), lines); + return new LineStringBuilder(coordinates); } @Override - public Query visit(MultiPoint multiPoint) { - throw new QueryShardException(context, "Field [" + fieldName + "] does not support " + GeoShapeType.MULTIPOINT + - " queries"); + public ShapeBuilder visit(LinearRing ring) { + throw new UnsupportedOperationException("circle is not supported"); } @Override - public Query visit(MultiPolygon multiPolygon) { - Polygon[] polygons = new Polygon[multiPolygon.size()]; - for (int i=0; i visit(MultiLine multiLine) { + MultiLineStringBuilder lines = new MultiLineStringBuilder(); + for (int i = 0; i < multiLine.size(); i++) { + lines.linestring((LineStringBuilder) visit(multiLine.get(i))); } - return LatLonShape.newPolygonQuery(fieldName(), relation.getLuceneRelation(), polygons); + return lines; } @Override - public Query visit(Point point) { - return LatLonShape.newBoxQuery(fieldName, relation.getLuceneRelation(), - point.getLat(), point.getLat(), point.getLon(), point.getLon()); + public ShapeBuilder visit(MultiPoint multiPoint) { + List coordinates = new ArrayList<>(); + for (int i = 0; i < multiPoint.size(); i++) { + Point p = multiPoint.get(i); + coordinates.add(new Coordinate(p.getLon(), p.getLat(), p.getAlt())); + } + return new MultiPointBuilder(coordinates); } @Override - public Query visit(org.elasticsearch.geo.geometry.Polygon polygon) { - return LatLonShape.newPolygonQuery(fieldName(), relation.getLuceneRelation(), toLucenePolygon(polygon)); + public ShapeBuilder visit(MultiPolygon multiPolygon) { + MultiPolygonBuilder polygons = new MultiPolygonBuilder(); + for (int i = 0; i < multiPolygon.size(); i++) { + polygons.polygon((PolygonBuilder) visit(multiPolygon.get(i))); + } + return polygons; } @Override - public Query visit(org.elasticsearch.geo.geometry.Rectangle r) { - return LatLonShape.newBoxQuery(fieldName(), relation.getLuceneRelation(), - r.getMinLat(), r.getMaxLat(), r.getMinLon(), r.getMaxLon()); + public ShapeBuilder visit(Point point) { + return new PointBuilder(point.getLon(), point.getLat()); } - }); - } - - /** - * Fetches the Shape with the given ID in the given type and index. - * - * @param getRequest - * GetRequest containing index, type and id - * @param path - * Name or path of the field in the Shape Document where the - * Shape itself is located - */ - private void fetch(Client client, GetRequest getRequest, String path, ActionListener listener) { - getRequest.preference("_local"); - client.get(getRequest, new ActionListener(){ @Override - public void onResponse(GetResponse response) { - try { - if (!response.isExists()) { - throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() - + "] not found"); - } - if (response.isSourceEmpty()) { - throw new IllegalArgumentException("Shape with ID [" + getRequest.id() + "] in type [" + getRequest.type() + - "] source disabled"); - } - - String[] pathElements = path.split("\\."); - int currentPathSlot = 0; - - // It is safe to use EMPTY here because this never uses namedObject - try (XContentParser parser = XContentHelper - .createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, response.getSourceAsBytesRef())) { - XContentParser.Token currentToken; - while ((currentToken = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (currentToken == XContentParser.Token.FIELD_NAME) { - if (pathElements[currentPathSlot].equals(parser.currentName())) { - parser.nextToken(); - if (++currentPathSlot == pathElements.length) { - listener.onResponse(new GeometryParser(true, true, true).parse(parser)); - return; - } - } else { - parser.nextToken(); - parser.skipChildren(); - } - } - } - throw new IllegalStateException("Shape with name [" + getRequest.id() + "] found but missing " + path + " field"); - } - } catch (Exception e) { - onFailure(e); + public ShapeBuilder visit(org.elasticsearch.geo.geometry.Polygon polygon) { + PolygonBuilder polygonBuilder = + new PolygonBuilder((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getPolygon()), + ShapeBuilder.Orientation.RIGHT, false); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + polygonBuilder.hole((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getHole(i))); } + return polygonBuilder; } @Override - public void onFailure(Exception e) { - listener.onFailure(e); + public ShapeBuilder visit(Rectangle rectangle) { + return new EnvelopeBuilder(new Coordinate(rectangle.getMinLon(), rectangle.getMaxLat()), + new Coordinate(rectangle.getMaxLon(), rectangle.getMinLat())); } }); - - } - - public static SpatialArgs getArgs(Geometry shape, ShapeRelation relation) { - switch (relation) { - case DISJOINT: - return new SpatialArgs(SpatialOperation.IsDisjointTo, buildS4J(shape)); - case INTERSECTS: - return new SpatialArgs(SpatialOperation.Intersects, buildS4J(shape)); - case WITHIN: - return new SpatialArgs(SpatialOperation.IsWithin, buildS4J(shape)); - case CONTAINS: - return new SpatialArgs(SpatialOperation.Contains, buildS4J(shape)); - default: - throw new IllegalArgumentException("invalid relation [" + relation + "]"); - } + return shapeBuilder; } - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - - builder.startObject(fieldName); + private class ShapeVisitor implements GeometryVisitor { + QueryShardContext context; + MappedFieldType fieldType; - if (strategy != null) { - builder.field(STRATEGY_FIELD.getPreferredName(), strategy.getStrategyName()); + ShapeVisitor(QueryShardContext context) { + this.context = context; + this.fieldType = context.fieldMapper(fieldName); } - if (shape != null) { - builder.field(SHAPE_FIELD.getPreferredName()); - GeoJson.toXContent(shape, builder,params); - } else { - builder.startObject(INDEXED_SHAPE_FIELD.getPreferredName()) - .field(SHAPE_ID_FIELD.getPreferredName(), indexedShapeId); - if (indexedShapeType != null) { - builder.field(SHAPE_TYPE_FIELD.getPreferredName(), indexedShapeType); - } - if (indexedShapeIndex != null) { - builder.field(SHAPE_INDEX_FIELD.getPreferredName(), indexedShapeIndex); - } - if (indexedShapePath != null) { - builder.field(SHAPE_PATH_FIELD.getPreferredName(), indexedShapePath); - } - if (indexedShapeRouting != null) { - builder.field(SHAPE_ROUTING_FIELD.getPreferredName(), indexedShapeRouting); - } - builder.endObject(); + @Override + public Query visit(Circle circle) { + throw new QueryShardException(context, "Field [" + fieldName + "] found and unknown shape Circle"); } - if(relation != null) { - builder.field(RELATION_FIELD.getPreferredName(), relation.getRelationName()); + @Override + public Query visit(GeometryCollection collection) { + BooleanQuery.Builder bqb = new BooleanQuery.Builder(); + visit(bqb, collection); + return bqb.build(); } - builder.endObject(); - builder.field(IGNORE_UNMAPPED_FIELD.getPreferredName(), ignoreUnmapped); - - printBoostAndQueryName(builder); - - builder.endObject(); - } - - public static GeoShapeQueryBuilder fromXContent(XContentParser parser) throws IOException { - String fieldName = null; - ShapeRelation shapeRelation = null; - SpatialStrategy strategy = null; - ShapeBuilder shape = null; - - String id = null; - String type = null; - String index = null; - String shapePath = null; - String shapeRouting = null; - - XContentParser.Token token; - String currentFieldName = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - String queryName = null; - boolean ignoreUnmapped = DEFAULT_IGNORE_UNMAPPED; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if (fieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "[" + - GeoShapeQueryBuilder.NAME + "] point specified twice. [" + currentFieldName + "]"); - } - fieldName = currentFieldName; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - token = parser.nextToken(); - if (SHAPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - shape = ShapeParser.parse(parser); - } else if (STRATEGY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - String strategyName = parser.text(); - strategy = SpatialStrategy.fromString(strategyName); - if (strategy == null) { - throw new ParsingException(parser.getTokenLocation(), "Unknown strategy [" + strategyName + " ]"); - } - } else if (RELATION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - shapeRelation = ShapeRelation.getRelationByName(parser.text()); - if (shapeRelation == null) { - throw new ParsingException(parser.getTokenLocation(), "Unknown shape operation [" + parser.text() + " ]"); - } - } else if (INDEXED_SHAPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (SHAPE_ID_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - id = parser.text(); - } else if (SHAPE_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - type = parser.text(); - } else if (SHAPE_INDEX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - index = parser.text(); - } else if (SHAPE_PATH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - shapePath = parser.text(); - } else if (SHAPE_ROUTING_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - shapeRouting = parser.text(); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + - "] unknown token [" + token + "] after [" + currentFieldName + "]"); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + - "] query does not support [" + currentFieldName + "]"); - } - } - } - } else if (token.isValue()) { - if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - boost = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - queryName = parser.text(); - } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - ignoreUnmapped = parser.booleanValue(); + private void visit(BooleanQuery.Builder bqb, GeometryCollection collection) { + for (Geometry shape : collection) { + if (shape instanceof MultiPoint) { + // Flatten multipoints + visit(bqb, (GeometryCollection) shape); } else { - throw new ParsingException(parser.getTokenLocation(), "[" + GeoShapeQueryBuilder.NAME + - "] query does not support [" + currentFieldName + "]"); + bqb.add(shape.visit(this), BooleanClause.Occur.SHOULD); } } } - GeoShapeQueryBuilder builder; - if (type != null) { - deprecationLogger.deprecatedAndMaybeLog( - "geo_share_query_with_types", TYPES_DEPRECATION_MESSAGE); + + @Override + public Query visit(org.elasticsearch.geo.geometry.Line line) { + validateIsGeoShapeFieldType(); + return LatLonShape.newLineQuery(fieldName(), relation.getLuceneRelation(), new Line(line.getLats(), line.getLons())); } - if (shape != null) { - builder = new GeoShapeQueryBuilder(fieldName, shape); - } else { - builder = new GeoShapeQueryBuilder(fieldName, id, type); + @Override + public Query visit(LinearRing ring) { + throw new QueryShardException(context, "Field [" + fieldName + "] found and unsupported shape LinearRing"); } - if (index != null) { - builder.indexedShapeIndex(index); + + @Override + public Query visit(MultiLine multiLine) { + validateIsGeoShapeFieldType(); + Line[] lines = new Line[multiLine.size()]; + for (int i=0; i supplier = new SetOnce<>(); - queryRewriteContext.registerAsyncAction((client, listener) -> { - GetRequest getRequest; - if (indexedShapeType == null) { - getRequest = new GetRequest(indexedShapeIndex, indexedShapeId); + private static class ParsedGeoShapeQueryParams extends ParsedShapeQueryParams { + SpatialStrategy strategy; + + @Override + protected boolean parseXContentField(XContentParser parser) throws IOException { + SpatialStrategy strategy; + if (SHAPE_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) { + this.shape = ShapeParser.parse(parser); + return true; + } else if (STRATEGY_FIELD.match(parser.currentName(), parser.getDeprecationHandler())) { + String strategyName = parser.text(); + strategy = SpatialStrategy.fromString(strategyName); + if (strategy == null) { + throw new ParsingException(parser.getTokenLocation(), "Unknown strategy [" + strategyName + " ]"); } else { - getRequest = new GetRequest(indexedShapeIndex, indexedShapeType, indexedShapeId); + this.strategy = strategy; } - getRequest.routing(indexedShapeRouting); - fetch(client, getRequest, indexedShapePath, ActionListener.wrap(builder-> { - supplier.set(builder); - listener.onResponse(null); - }, listener::onFailure)); - }); - return new GeoShapeQueryBuilder(this.fieldName, supplier::get, this.indexedShapeId, this.indexedShapeType).relation(relation) - .strategy(strategy); + return true; + } + return false; } - return this; - } - - /** - * Builds JTS shape from a geometry - * - * This method is needed to handle legacy indices and will be removed when we no longer need to build JTS shapes - */ - private static Shape buildS4J(Geometry geometry) { - return geometryToShapeBuilder(geometry).buildS4J(); } - public static ShapeBuilder geometryToShapeBuilder(Geometry geometry) { - ShapeBuilder shapeBuilder = geometry.visit(new GeometryVisitor<>() { - @Override - public ShapeBuilder visit(Circle circle) { - throw new UnsupportedOperationException("circle is not supported"); - } + public static GeoShapeQueryBuilder fromXContent(XContentParser parser) throws IOException { + ParsedGeoShapeQueryParams pgsqp = + (ParsedGeoShapeQueryParams) AbstractGeometryQueryBuilder.parsedParamsFromXContent(parser, new ParsedGeoShapeQueryParams()); - @Override - public ShapeBuilder visit(GeometryCollection collection) { - GeometryCollectionBuilder shapes = new GeometryCollectionBuilder(); - for (Geometry geometry : collection) { - shapes.shape(geometry.visit(this)); - } - return shapes; - } + GeoShapeQueryBuilder builder; + if (pgsqp.type != null) { + deprecationLogger.deprecatedAndMaybeLog("geo_share_query_with_types", TYPES_DEPRECATION_MESSAGE); + } - @Override - public ShapeBuilder visit(org.elasticsearch.geo.geometry.Line line) { - List coordinates = new ArrayList<>(); - for (int i = 0; i < line.length(); i++) { - coordinates.add(new Coordinate(line.getLon(i), line.getLat(i), line.getAlt(i))); - } - return new LineStringBuilder(coordinates); - } + if (pgsqp.shape != null) { + builder = new GeoShapeQueryBuilder(pgsqp.fieldName, pgsqp.shape); + } else { + builder = new GeoShapeQueryBuilder(pgsqp.fieldName, pgsqp.id, pgsqp.type); + } - @Override - public ShapeBuilder visit(LinearRing ring) { - throw new UnsupportedOperationException("circle is not supported"); - } + if (pgsqp.index != null) { + builder.indexedShapeIndex(pgsqp.index); + } - @Override - public ShapeBuilder visit(MultiLine multiLine) { - MultiLineStringBuilder lines = new MultiLineStringBuilder(); - for (int i = 0; i < multiLine.size(); i++) { - lines.linestring((LineStringBuilder) visit(multiLine.get(i))); - } - return lines; - } + if (pgsqp.shapePath != null) { + builder.indexedShapePath(pgsqp.shapePath); + } - @Override - public ShapeBuilder visit(MultiPoint multiPoint) { - List coordinates = new ArrayList<>(); - for (int i = 0; i < multiPoint.size(); i++) { - Point p = multiPoint.get(i); - coordinates.add(new Coordinate(p.getLon(), p.getLat(), p.getAlt())); - } - return new MultiPointBuilder(coordinates); - } + if (pgsqp.shapeRouting != null) { + builder.indexedShapeRouting(pgsqp.shapeRouting); + } - @Override - public ShapeBuilder visit(MultiPolygon multiPolygon) { - MultiPolygonBuilder polygons = new MultiPolygonBuilder(); - for (int i = 0; i < multiPolygon.size(); i++) { - polygons.polygon((PolygonBuilder) visit(multiPolygon.get(i))); - } - return polygons; - } + if (pgsqp.relation != null) { + builder.relation(pgsqp.relation); + } - @Override - public ShapeBuilder visit(Point point) { - return new PointBuilder(point.getLon(), point.getLat()); - } + if (pgsqp.strategy != null) { + builder.strategy(pgsqp.strategy); + } - @Override - public ShapeBuilder visit(org.elasticsearch.geo.geometry.Polygon polygon) { - PolygonBuilder polygonBuilder = - new PolygonBuilder((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getPolygon()), - ShapeBuilder.Orientation.RIGHT, false); - for (int i = 0; i < polygon.getNumberOfHoles(); i++) { - polygonBuilder.hole((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getHole(i))); - } - return polygonBuilder; - } + if (pgsqp.queryName != null) { + builder.queryName(pgsqp.queryName); + } - @Override - public ShapeBuilder visit(Rectangle rectangle) { - return new EnvelopeBuilder(new Coordinate(rectangle.getMinLon(), rectangle.getMaxLat()), - new Coordinate(rectangle.getMaxLon(), rectangle.getMinLat())); - } - }); - return shapeBuilder; + builder.boost(pgsqp.boost); + builder.ignoreUnmapped(pgsqp.ignoreUnmapped); + return builder; } } From 8ed39438dfa476df95513a93d1151a4b949f0e98 Mon Sep 17 00:00:00 2001 From: Deb Adair Date: Wed, 24 Jul 2019 14:23:30 -0700 Subject: [PATCH 10/51] [DOCS] Adding x version of the intro blurb for the TOC. --- docs/reference/index.x-docinfo.xml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 docs/reference/index.x-docinfo.xml diff --git a/docs/reference/index.x-docinfo.xml b/docs/reference/index.x-docinfo.xml new file mode 100644 index 0000000000000..e681b78c45596 --- /dev/null +++ b/docs/reference/index.x-docinfo.xml @@ -0,0 +1,13 @@ + + + Welcome to the official documentation for Elasticsearch: + the search and analytics engine that powers the Elastic Stack. + If you want to learn how to use Elasticsearch to search and analyze your + data, you've come to the right place. This guide shows you how to: + + + Install, configure, and administer an Elasticsearch cluster. + Index your data, optimize your indices, and search with the Elasticsearch query language. + Discover trends, patterns, and anomalies with aggregations and the machine learning APIs. + + From f1bdc0c03e690aba06145e70d1d16722b2085d6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Thu, 25 Jul 2019 08:04:36 +0200 Subject: [PATCH 11/51] Add result_type field to TimingStats and DatafeedTimingStats documents (#44812) --- .../core/ml/datafeed/DatafeedTimingStats.java | 6 +++- .../process/autodetect/state/TimingStats.java | 4 +++ .../job/persistence/JobResultsPersister.java | 31 +++++++++++++++---- .../persistence/JobResultsPersisterTests.java | 2 ++ 4 files changed, 36 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java index 775dc9931bc86..4e2d51b2ebacd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; @@ -38,7 +39,7 @@ public class DatafeedTimingStats implements ToXContentObject, Writeable { private static ConstructingObjectParser createParser() { ConstructingObjectParser parser = new ConstructingObjectParser<>( - "datafeed_timing_stats", + TYPE.getPreferredName(), true, args -> { String jobId = (String) args[0]; @@ -128,6 +129,9 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); + if (params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) { + builder.field(Result.RESULT_TYPE.getPreferredName(), TYPE.getPreferredName()); + } builder.field(JOB_ID.getPreferredName(), jobId); builder.field(SEARCH_COUNT.getPreferredName(), searchCount); builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java index b526d614df3ab..a99260e668685 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; @@ -195,6 +196,9 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + if (params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) { + builder.field(Result.RESULT_TYPE.getPreferredName(), TYPE.getPreferredName()); + } builder.field(Job.ID.getPreferredName(), jobId); builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); if (params.paramAsBoolean(ToXContentParams.INCLUDE_CALCULATED_FIELDS, false)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java index 1d960c5741836..783706259a17b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java @@ -38,6 +38,7 @@ import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; import java.util.Collections; @@ -130,7 +131,11 @@ private void persistBucketInfluencersStandalone(String jobId, List persist(String indexName) { void persist(String indexName, ActionListener listener) { logCall(indexName); - try (XContentBuilder content = toXContentBuilder(object)) { + try (XContentBuilder content = toXContentBuilder(object, params)) { IndexRequest indexRequest = new IndexRequest(indexName).id(id).source(content).setRefreshPolicy(refreshPolicy); executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest, listener, client::index); } catch (IOException e) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index da69ef3760a6f..94017ef266f2c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -221,6 +221,7 @@ public void testPersistTimingStats() { indexRequest.sourceAsMap(), equalTo( Map.of( + "result_type", "timing_stats", "job_id", "foo", "bucket_count", 7, "minimum_bucket_processing_time_ms", 1.0, @@ -259,6 +260,7 @@ public void testPersistDatafeedTimingStats() { indexRequest.sourceAsMap(), equalTo( Map.of( + "result_type", "datafeed_timing_stats", "job_id", "foo", "search_count", 6, "bucket_count", 66, From 5b9ccd72e9a3bb65c8b7b06979a75cb795c17111 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Thu, 25 Jul 2019 09:54:04 +0300 Subject: [PATCH 12/51] SQL: [Tests] Re-enable testDriverConfigurationWithSSLInURL test with more logging (#44800) --- .../xpack/sql/jdbc/JdbcConfigurationTests.java | 18 ++++++++++++++---- .../src/test/resources/plugin-security.policy | 2 ++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java index acaf6917862c6..a8495fbf57117 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -5,11 +5,15 @@ */ package org.elasticsearch.xpack.sql.jdbc; +import org.elasticsearch.SpecialPermission; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.client.SslConfig; +import org.elasticsearch.xpack.sql.client.SuppressForbidden; import java.net.URI; import java.net.URISyntaxException; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.sql.DriverManager; import java.sql.SQLException; import java.util.HashMap; @@ -252,14 +256,20 @@ public void testSSLPropertiesOverride() throws Exception { assertSslConfig(props, JdbcConfiguration.create("jdbc:es://test?" + sslUrlProps.toString(), props, 0).sslConfig()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41557") + @SuppressForbidden(reason = "JDBC drivers allows logging to Sys.out") public void testDriverConfigurationWithSSLInURL() { Map urlPropMap = sslProperties(); - - Properties allProps = new Properties(); - allProps.putAll(urlPropMap); String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + AccessController.doPrivileged((PrivilegedAction) () -> { + DriverManager.setLogWriter(new java.io.PrintWriter(System.out)); + return null; + }); + try { DriverManager.getDriver("jdbc:es://test?" + sslUrlProps); } catch (SQLException sqle) { diff --git a/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy b/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy index 5f16c1579b0be..577795ffb7842 100644 --- a/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy +++ b/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy @@ -1,4 +1,6 @@ grant { // Required for testing the Driver registration permission java.sql.SQLPermission "deregisterDriver"; + // Required for debug logging purposes + permission java.sql.SQLPermission "setLog"; }; From 06dea859e8fddada868941aaae15e83b4f64babe Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Thu, 25 Jul 2019 10:26:38 +0300 Subject: [PATCH 13/51] SQL: fix URI path being lost in case of hosted ES scenario (#44776) --- .../sql/client/JreHttpUrlConnection.java | 3 +- .../xpack/sql/client/UriUtils.java | 29 ++++++++++ .../xpack/sql/client/UriUtilsTests.java | 53 +++++++++++++++++++ 3 files changed, 84 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java index 716b1bb058a53..1e3f2d95ae2c1 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java @@ -35,6 +35,7 @@ import javax.sql.rowset.serial.SerialException; import static java.util.Collections.emptyMap; +import static org.elasticsearch.xpack.sql.client.UriUtils.appendSegmentToPath; import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; /** @@ -52,7 +53,7 @@ public class JreHttpUrlConnection implements Closeable { + "?error_trace] and method [POST], allowed:"; public static R http(String path, String query, ConnectionConfiguration cfg, Function handler) { - final URI uriPath = cfg.baseUri().resolve(path); // update path if needed + final URI uriPath = appendSegmentToPath(cfg.baseUri(), path); // update path if needed final String uriQuery = query == null ? uriPath.getQuery() : query; // update query if needed final URL url; try { diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/UriUtils.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/UriUtils.java index 26113010c61da..4f07e15df87fb 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/UriUtils.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/UriUtils.java @@ -76,4 +76,33 @@ public static URI removeQuery(URI uri, String connectionString, URI defaultURI) throw new IllegalArgumentException("Invalid connection configuration [" + connectionString + "]: " + e.getMessage(), e); } } + + public static URI appendSegmentToPath(URI uri, String segment) { + if (uri == null) { + throw new IllegalArgumentException("URI must not be null"); + } + if (segment == null || segment.isEmpty() || "/".equals(segment)) { + return uri; + } + + String path = uri.getPath(); + String concatenatedPath = ""; + String cleanSegment = segment.startsWith("/") ? segment.substring(1) : segment; + + if (path == null || path.isEmpty()) { + path = "/"; + } + + if (path.charAt(path.length() - 1) == '/') { + concatenatedPath = path + cleanSegment; + } else { + concatenatedPath = path + "/" + cleanSegment; + } + try { + return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), concatenatedPath, + uri.getQuery(), uri.getFragment()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid segment [" + segment + "] for URI [" + uri + "]: " + e.getMessage(), e); + } + } } diff --git a/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/UriUtilsTests.java b/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/UriUtilsTests.java index 0b7f6c47b0df0..c710a7574e41e 100644 --- a/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/UriUtilsTests.java +++ b/x-pack/plugin/sql/sql-client/src/test/java/org/elasticsearch/xpack/sql/client/UriUtilsTests.java @@ -9,6 +9,7 @@ import java.net.URI; +import static org.elasticsearch.xpack.sql.client.UriUtils.appendSegmentToPath; import static org.elasticsearch.xpack.sql.client.UriUtils.parseURI; import static org.elasticsearch.xpack.sql.client.UriUtils.removeQuery; @@ -84,4 +85,56 @@ public void testRemoveQueryNoQuery() throws Exception { assertEquals(URI.create("http://server:9100"), removeQuery(URI.create("http://server:9100"), "http://server:9100", DEFAULT_URI)); } + + public void testAppendEmptySegmentToPath() throws Exception { + assertEquals(URI.create("http://server:9100"), + appendSegmentToPath(URI.create("http://server:9100"), "")); + } + + public void testAppendNullSegmentToPath() throws Exception { + assertEquals(URI.create("http://server:9100"), + appendSegmentToPath(URI.create("http://server:9100"), null)); + } + + public void testAppendSegmentToNullPath() throws Exception { + assertEquals( + "URI must not be null", + expectThrows(IllegalArgumentException.class, () -> appendSegmentToPath(null, "/_sql")).getMessage() + ); + } + + public void testAppendSegmentToEmptyPath() throws Exception { + assertEquals(URI.create("/_sql"), + appendSegmentToPath(URI.create(""), "/_sql")); + } + + public void testAppendSlashSegmentToPath() throws Exception { + assertEquals(URI.create("http://server:9100"), + appendSegmentToPath(URI.create("http://server:9100"), "/")); + } + + public void testAppendSqlSegmentToPath() throws Exception { + assertEquals(URI.create("http://server:9100/_sql"), + appendSegmentToPath(URI.create("http://server:9100"), "/_sql")); + } + + public void testAppendSqlSegmentNoSlashToPath() throws Exception { + assertEquals(URI.create("http://server:9100/_sql"), + appendSegmentToPath(URI.create("http://server:9100"), "_sql")); + } + + public void testAppendSegmentToPath() throws Exception { + assertEquals(URI.create("http://server:9100/es_rest/_sql"), + appendSegmentToPath(URI.create("http://server:9100/es_rest"), "/_sql")); + } + + public void testAppendSegmentNoSlashToPath() throws Exception { + assertEquals(URI.create("http://server:9100/es_rest/_sql"), + appendSegmentToPath(URI.create("http://server:9100/es_rest"), "_sql")); + } + + public void testAppendSegmentTwoSlashesToPath() throws Exception { + assertEquals(URI.create("https://server:9100/es_rest/_sql"), + appendSegmentToPath(URI.create("https://server:9100/es_rest/"), "/_sql")); + } } From c8974aabf62e94774e5816981787ddd19287d6f8 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 25 Jul 2019 11:44:03 +0100 Subject: [PATCH 14/51] [ML-DataFrame] Remove ID field from data frame indexer stats (#44768) This is a followup to #44350. The indexer stats used to be persisted standalone, but now are only persisted as part of a state-and-stats document. During the review of #44350 it was decided that we'll stick with this design, so there will never be a need for an indexer stats object to store its transform ID as it is stored on the enclosing document. This PR removes the indexer stats document ID. --- .../DataFrameIndexerTransformStatsTests.java | 8 +-- .../hlrc/DataFrameTransformStatsTests.java | 7 +- .../DataFrameIndexerTransformStats.java | 71 +++++-------------- .../transforms/DataFrameTransformStats.java | 2 +- .../DataFrameIndexerTransformStatsTests.java | 26 ++----- .../DataFrameTransformStatsTests.java | 2 +- .../DataFrameTransformStoredDocTests.java | 2 +- .../DataFrameInfoTransportAction.java | 4 +- .../DataFrameUsageTransportAction.java | 5 +- ...nsportPreviewDataFrameTransformAction.java | 2 +- .../transforms/DataFrameTransformTask.java | 6 +- .../DataFrameInfoTransportActionTests.java | 4 +- .../transforms/DataFrameIndexerTests.java | 2 +- .../pivot/AggregationResultUtilsTests.java | 4 +- 14 files changed, 46 insertions(+), 99 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java index a2626d357b499..e42aaa97b2258 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java @@ -31,7 +31,7 @@ public class DataFrameIndexerTransformStatsTests extends AbstractHlrcXContentTes public static DataFrameIndexerTransformStats fromHlrc( org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats instance) { - return DataFrameIndexerTransformStats.withDefaultTransformId(instance.getNumPages(), instance.getNumDocuments(), + return new DataFrameIndexerTransformStats(instance.getNumPages(), instance.getNumDocuments(), instance.getOutputDocuments(), instance.getNumInvocations(), instance.getIndexTime(), instance.getSearchTime(), instance.getIndexTotal(), instance.getSearchTotal(), instance.getIndexFailures(), instance.getSearchFailures()); } @@ -48,8 +48,8 @@ public DataFrameIndexerTransformStats convertHlrcToInternal( return fromHlrc(instance); } - public static DataFrameIndexerTransformStats randomStats(String transformId) { - return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L), + public static DataFrameIndexerTransformStats randomStats() { + return new DataFrameIndexerTransformStats(randomLongBetween(10L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L)); @@ -57,7 +57,7 @@ public static DataFrameIndexerTransformStats randomStats(String transformId) { @Override protected DataFrameIndexerTransformStats createTestInstance() { - return randomStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID); + return randomStats(); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStatsTests.java index 19ddb8be1abbf..e5dd37fcd9d5d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStatsTests.java @@ -79,8 +79,7 @@ public static DataFrameTransformStats randomDataFrameTransformStats() { randomFrom(DataFrameTransformTaskState.values()), randomBoolean() ? null : randomAlphaOfLength(100), randomBoolean() ? null : randomNodeAttributes(), - // TODO: remove this ID field from the server side as it's no longer needed - randomStats("_all"), + randomStats(), DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo()); } @@ -132,8 +131,8 @@ public static NodeAttributes randomNodeAttributes() { attributes); } - public static DataFrameIndexerTransformStats randomStats(String transformId) { - return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L), + public static DataFrameIndexerTransformStats randomStats() { + return new DataFrameIndexerTransformStats(randomLongBetween(10L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java index 2926a88768625..1fc8c2b826350 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java @@ -6,24 +6,23 @@ package org.elasticsearch.xpack.core.dataframe.transforms; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.indexing.IndexerJobStats; import java.io.IOException; import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; public class DataFrameIndexerTransformStats extends IndexerJobStats { - public static final String DEFAULT_TRANSFORM_ID = "_all"; + + private static final String DEFAULT_TRANSFORM_ID = "_all"; // TODO remove when no longer needed for wire BWC public static final String NAME = "data_frame_indexer_transform_stats"; public static ParseField NUM_PAGES = new ParseField("pages_processed"); @@ -39,12 +38,11 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { private static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( NAME, true, - args -> new DataFrameIndexerTransformStats(args[0] != null ? (String) args[0] : DEFAULT_TRANSFORM_ID, - (long) args[1], (long) args[2], (long) args[3], (long) args[4], (long) args[5], (long) args[6], (long) args[7], - (long) args[8], (long) args[9], (long) args[10])); + args -> new DataFrameIndexerTransformStats( + (long) args[0], (long) args[1], (long) args[2], (long) args[3], (long) args[4], (long) args[5], (long) args[6], + (long) args[7], (long) args[8], (long) args[9])); static { - LENIENT_PARSER.declareString(optionalConstructorArg(), DataFrameField.ID); LENIENT_PARSER.declareLong(constructorArg(), NUM_PAGES); LENIENT_PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); LENIENT_PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); @@ -57,60 +55,38 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { LENIENT_PARSER.declareLong(constructorArg(), SEARCH_FAILURES); } - private final String transformId; - /** - * Certain situations call for a default transform ID, e.g. when merging many different transforms for statistics gather. - * - * The returned stats object cannot be stored in the index as the transformId does not refer to a real transform configuration - * - * @return new DataFrameIndexerTransformStats with empty stats and a default transform ID + * Create with all stats set to zero */ - public static DataFrameIndexerTransformStats withDefaultTransformId() { - return new DataFrameIndexerTransformStats(DEFAULT_TRANSFORM_ID); - } - - public static DataFrameIndexerTransformStats withDefaultTransformId(long numPages, long numInputDocuments, long numOutputDocuments, - long numInvocations, long indexTime, long searchTime, - long indexTotal, long searchTotal, long indexFailures, - long searchFailures) { - return new DataFrameIndexerTransformStats(DEFAULT_TRANSFORM_ID, numPages, numInputDocuments, - numOutputDocuments, numInvocations, indexTime, searchTime, indexTotal, searchTotal, - indexFailures, searchFailures); - } - - public DataFrameIndexerTransformStats(String transformId) { + public DataFrameIndexerTransformStats() { super(); - this.transformId = Objects.requireNonNull(transformId, "parameter transformId must not be null"); } - public DataFrameIndexerTransformStats(String transformId, long numPages, long numInputDocuments, long numOutputDocuments, + public DataFrameIndexerTransformStats(long numPages, long numInputDocuments, long numOutputDocuments, long numInvocations, long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures) { super(numPages, numInputDocuments, numOutputDocuments, numInvocations, indexTime, searchTime, indexTotal, searchTotal, indexFailures, searchFailures); - this.transformId = Objects.requireNonNull(transformId, "parameter transformId must not be null"); } public DataFrameIndexerTransformStats(DataFrameIndexerTransformStats other) { - this(other.transformId, other.numPages, other.numInputDocuments, other.numOuputDocuments, other.numInvocations, + this(other.numPages, other.numInputDocuments, other.numOuputDocuments, other.numInvocations, other.indexTime, other.searchTime, other.indexTotal, other.searchTotal, other.indexFailures, other.searchFailures); } public DataFrameIndexerTransformStats(StreamInput in) throws IOException { super(in); - transformId = in.readString(); + if (in.getVersion().before(Version.V_8_0_0)) { // TODO change to 7.4.0 after backport + in.readString(); // was transformId + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(transformId); - } - - @Nullable - public String getTransformId() { - return transformId; + if (out.getVersion().before(Version.V_8_0_0)) { // TODO change to 7.4.0 after backport + out.writeString(DEFAULT_TRANSFORM_ID); + } } @Override @@ -126,21 +102,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(SEARCH_TIME_IN_MS.getPreferredName(), searchTime); builder.field(SEARCH_TOTAL.getPreferredName(), searchTotal); builder.field(SEARCH_FAILURES.getPreferredName(), searchFailures); - if (params.paramAsBoolean(DataFrameField.FOR_INTERNAL_STORAGE, false)) { - // If we are storing something, it should have a valid transform ID. - if (transformId.equals(DEFAULT_TRANSFORM_ID)) { - throw new IllegalArgumentException("when storing transform statistics, a valid transform id must be provided"); - } - builder.field(DataFrameField.ID.getPreferredName(), transformId); - } builder.endObject(); return builder; } public DataFrameIndexerTransformStats merge(DataFrameIndexerTransformStats other) { - // We should probably not merge two sets of stats unless one is an accumulation object (i.e. with the default transform id) - // or the stats are referencing the same transform - assert transformId.equals(DEFAULT_TRANSFORM_ID) || this.transformId.equals(other.transformId); numPages += other.numPages; numInputDocuments += other.numInputDocuments; numOuputDocuments += other.numOuputDocuments; @@ -167,8 +133,7 @@ public boolean equals(Object other) { DataFrameIndexerTransformStats that = (DataFrameIndexerTransformStats) other; - return Objects.equals(this.transformId, that.transformId) - && Objects.equals(this.numPages, that.numPages) + return Objects.equals(this.numPages, that.numPages) && Objects.equals(this.numInputDocuments, that.numInputDocuments) && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) && Objects.equals(this.numInvocations, that.numInvocations) @@ -182,7 +147,7 @@ public boolean equals(Object other) { @Override public int hashCode() { - return Objects.hash(transformId, numPages, numInputDocuments, numOuputDocuments, numInvocations, + return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations, indexTime, searchTime, indexFailures, searchFailures, indexTotal, searchTotal); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStats.java index 535b730aa0f26..865bc07931d86 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStats.java @@ -75,7 +75,7 @@ public static DataFrameTransformStats fromXContent(XContentParser parser) throws } public static DataFrameTransformStats initialStats(String id) { - return stoppedStats(id, new DataFrameIndexerTransformStats(id)); + return stoppedStats(id, new DataFrameIndexerTransformStats()); } public static DataFrameTransformStats stoppedStats(String id, DataFrameIndexerTransformStats indexerTransformStats) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStatsTests.java index 54fc5d5d45dc5..057e41e235420 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStatsTests.java @@ -7,19 +7,13 @@ package org.elasticsearch.xpack.core.dataframe.transforms; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import java.io.IOException; -import java.util.Collections; public class DataFrameIndexerTransformStatsTests extends AbstractSerializingTestCase { - protected static ToXContent.Params TO_XCONTENT_PARAMS = new ToXContent.MapParams( - Collections.singletonMap(DataFrameField.FOR_INTERNAL_STORAGE, "true")); - @Override protected DataFrameIndexerTransformStats createTestInstance() { return randomStats(); @@ -36,36 +30,26 @@ protected DataFrameIndexerTransformStats doParseInstance(XContentParser parser) } public static DataFrameIndexerTransformStats randomStats() { - return randomStats(randomAlphaOfLength(10)); - } - - public static DataFrameIndexerTransformStats randomStats(String transformId) { - return new DataFrameIndexerTransformStats(transformId, randomLongBetween(10L, 10000L), + return new DataFrameIndexerTransformStats(randomLongBetween(10L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L)); } - @Override - protected ToXContent.Params getToXContentParams() { - return TO_XCONTENT_PARAMS; - } - public void testMerge() throws IOException { - String transformId = randomAlphaOfLength(10); - DataFrameIndexerTransformStats emptyStats = new DataFrameIndexerTransformStats(transformId); - DataFrameIndexerTransformStats randomStats = randomStats(transformId); + DataFrameIndexerTransformStats emptyStats = new DataFrameIndexerTransformStats(); + DataFrameIndexerTransformStats randomStats = randomStats(); assertEquals(randomStats, emptyStats.merge(randomStats)); assertEquals(randomStats, randomStats.merge(emptyStats)); DataFrameIndexerTransformStats randomStatsClone = copyInstance(randomStats); - DataFrameIndexerTransformStats trippleRandomStats = new DataFrameIndexerTransformStats(transformId, 3 * randomStats.getNumPages(), + DataFrameIndexerTransformStats tripleRandomStats = new DataFrameIndexerTransformStats(3 * randomStats.getNumPages(), 3 * randomStats.getNumDocuments(), 3 * randomStats.getOutputDocuments(), 3 * randomStats.getNumInvocations(), 3 * randomStats.getIndexTime(), 3 * randomStats.getSearchTime(), 3 * randomStats.getIndexTotal(), 3 * randomStats.getSearchTotal(), 3 * randomStats.getIndexFailures(), 3 * randomStats.getSearchFailures()); - assertEquals(trippleRandomStats, randomStats.merge(randomStatsClone).merge(randomStatsClone)); + assertEquals(tripleRandomStats, randomStats.merge(randomStatsClone).merge(randomStatsClone)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStatsTests.java index 8c708e67eacab..d9409bcec45e6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStatsTests.java @@ -20,7 +20,7 @@ public static DataFrameTransformStats randomDataFrameTransformStats() { randomFrom(DataFrameTransformTaskState.values()), randomBoolean() ? null : randomAlphaOfLength(100), randomBoolean() ? null : NodeAttributeTests.randomNodeAttributes(), - DataFrameIndexerTransformStatsTests.randomStats(DataFrameIndexerTransformStats.DEFAULT_TRANSFORM_ID), + DataFrameIndexerTransformStatsTests.randomStats(), DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDocTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDocTests.java index 4998295a14b05..3466adf51a19b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDocTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDocTests.java @@ -22,7 +22,7 @@ public class DataFrameTransformStoredDocTests extends AbstractSerializingDataFra public static DataFrameTransformStoredDoc randomDataFrameTransformStoredDoc(String id) { return new DataFrameTransformStoredDoc(id, DataFrameTransformStateTests.randomDataFrameTransformState(), - DataFrameIndexerTransformStatsTests.randomStats(id)); + DataFrameIndexerTransformStatsTests.randomStats()); } public static DataFrameTransformStoredDoc randomDataFrameTransformStoredDoc() { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportAction.java index 31d5fd860e446..dc0e4eefd0a5f 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportAction.java @@ -93,7 +93,7 @@ static DataFrameIndexerTransformStats parseSearchAggs(SearchResponse searchRespo statisticsList.add(0L); } } - return DataFrameIndexerTransformStats.withDefaultTransformId(statisticsList.get(0), // numPages + return new DataFrameIndexerTransformStats(statisticsList.get(0), // numPages statisticsList.get(1), // numInputDocuments statisticsList.get(2), // numOutputDocuments statisticsList.get(3), // numInvocations @@ -130,7 +130,7 @@ static void getStatisticSummations(Client client, ActionListener { if (failure instanceof ResourceNotFoundException) { - statsListener.onResponse(DataFrameIndexerTransformStats.withDefaultTransformId()); + statsListener.onResponse(new DataFrameIndexerTransformStats()); } else { statsListener.onFailure(failure); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameUsageTransportAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameUsageTransportAction.java index 01e729e71bfcf..13482cc9190fe 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameUsageTransportAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameUsageTransportAction.java @@ -65,8 +65,7 @@ protected void masterOperation(Task task, XPackUsageRequest request, ClusterStat ActionListener listener) { boolean available = licenseState.isDataFrameAllowed(); if (enabled == false) { - var usage = new DataFrameFeatureSetUsage(available, enabled, Collections.emptyMap(), - DataFrameIndexerTransformStats.withDefaultTransformId()); + var usage = new DataFrameFeatureSetUsage(available, enabled, Collections.emptyMap(), new DataFrameIndexerTransformStats()); listener.onResponse(new XPackUsageFeatureResponse(usage)); return; } @@ -99,7 +98,7 @@ protected void masterOperation(Task task, XPackUsageRequest request, ClusterStat long totalTransforms = transformCountSuccess.getHits().getTotalHits().value; if (totalTransforms == 0) { var usage = new DataFrameFeatureSetUsage(available, enabled, transformsCountByState, - DataFrameIndexerTransformStats.withDefaultTransformId()); + new DataFrameIndexerTransformStats()); listener.onResponse(new XPackUsageFeatureResponse(usage)); return; } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index efc9534cb2e33..0b7fb74155974 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -143,7 +143,7 @@ private void getPreview(Pivot pivot, r -> { try { final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); - DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); + DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); // remove all internal fields if (pipeline == null) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 5c20c1faab8f1..f007045807d77 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -166,7 +166,7 @@ public DataFrameTransformState getState() { public DataFrameIndexerTransformStats getStats() { if (getIndexer() == null) { - return new DataFrameIndexerTransformStats(getTransformId()); + return new DataFrameIndexerTransformStats(); } else { return getIndexer().getStats(); } @@ -425,7 +425,7 @@ static class ClientDataFrameIndexerBuilder { ClientDataFrameIndexerBuilder(String transformId) { this.transformId = transformId; - this.initialStats = new DataFrameIndexerTransformStats(transformId); + this.initialStats = new DataFrameIndexerTransformStats(); } ClientDataFrameIndexer build(DataFrameTransformTask parentTask) { @@ -551,7 +551,7 @@ static class ClientDataFrameIndexer extends DataFrameIndexer { fieldMappings, ExceptionsHelper.requireNonNull(initialState, "initialState"), initialPosition, - initialStats == null ? new DataFrameIndexerTransformStats(transformId) : initialStats, + initialStats == null ? new DataFrameIndexerTransformStats() : initialStats, transformProgress, lastCheckpoint, nextCheckpoint); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportActionTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportActionTests.java index 8695b774a35d8..886a6760f5019 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportActionTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameInfoTransportActionTests.java @@ -78,9 +78,9 @@ public void testParseSearchAggs() { when(withEmptyAggs.getAggregations()).thenReturn(emptyAggs); assertThat(DataFrameInfoTransportAction.parseSearchAggs(withEmptyAggs), - equalTo(DataFrameIndexerTransformStats.withDefaultTransformId())); + equalTo(new DataFrameIndexerTransformStats())); - DataFrameIndexerTransformStats expectedStats = new DataFrameIndexerTransformStats("_all", + DataFrameIndexerTransformStats expectedStats = new DataFrameIndexerTransformStats( 1, // numPages 2, // numInputDocuments 3, // numOutputDocuments diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java index 154588443cb2f..241a173d23250 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java @@ -213,7 +213,7 @@ public void testPageSizeAdapt() throws InterruptedException { DataFrameAuditor auditor = new DataFrameAuditor(client, "node_1"); MockedDataFrameIndexer indexer = new MockedDataFrameIndexer(executor, config, Collections.emptyMap(), auditor, state, null, - new DataFrameIndexerTransformStats(config.getId()), searchFunction, bulkFunction, failureConsumer); + new DataFrameIndexerTransformStats(), searchFunction, bulkFunction, failureConsumer); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java index 34a4b0d80b4e4..2135570dc5cd6 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java @@ -706,7 +706,7 @@ aggTypedName, asMap( "value", 122.55), DOC_COUNT, 44) )); - DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); + DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); Map fieldTypeMap = asStringMap( aggName, "double", @@ -789,7 +789,7 @@ private void executeTest(GroupConfig groups, Map fieldTypeMap, List> expected, long expectedDocCounts) throws IOException { - DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); + DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.map(input); From 74d7fa898c7d7afc407463267852c66d2dcd9139 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Thu, 25 Jul 2019 13:50:33 +0300 Subject: [PATCH 15/51] SecurityIndexManager handle RuntimeEx while reading mapping (#44409) Fixes exception handling while reading and parsing `.security-*` mappings and templates. --- .../support/SecurityIndexManager.java | 120 +++++++++--------- .../support/SecurityIndexManagerTests.java | 21 +++ 2 files changed, 82 insertions(+), 59 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 769465c3080a7..3808d9d6efc48 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -118,7 +118,8 @@ private SecurityIndexManager(Client client, ClusterService clusterService, Strin clusterService.addListener(this); } - private SecurityIndexManager(Client client, String aliasName, String internalIndexName, int internalIndexFormat, + // protected for testing + protected SecurityIndexManager(Client client, String aliasName, String internalIndexName, int internalIndexFormat, Supplier mappingSourceSupplier, State indexState) { this.aliasName = aliasName; this.internalIndexName = internalIndexName; @@ -346,65 +347,68 @@ public void checkIndexVersionThenExecute(final Consumer consumer, fin */ public void prepareIndexIfNeededThenExecute(final Consumer consumer, final Runnable andThen) { final State indexState = this.indexState; // use a local copy so all checks execute against the same state! - // TODO we should improve this so we don't fire off a bunch of requests to do the same thing (create or update mappings) - if (indexState == State.UNRECOVERED_STATE) { - consumer.accept(new ElasticsearchStatusException( - "Cluster state has not been recovered yet, cannot write to the [" + indexState.concreteIndexName + "] index", - RestStatus.SERVICE_UNAVAILABLE)); - } else if (indexState.indexExists() && indexState.isIndexUpToDate == false) { - consumer.accept(new IllegalStateException( - "Index [" + indexState.concreteIndexName + "] is not on the current version." - + "Security features relying on the index will not be available until the upgrade API is run on the index")); - } else if (indexState.indexExists() == false) { - assert indexState.concreteIndexName != null; - logger.info("security index does not exist. Creating [{}] with alias [{}]", indexState.concreteIndexName, this.aliasName); - final byte[] mappingSource = mappingSourceSupplier.get(); - final Tuple mappingAndSettings = parseMappingAndSettingsFromTemplateBytes(mappingSource); - CreateIndexRequest request = new CreateIndexRequest(indexState.concreteIndexName) - .alias(new Alias(this.aliasName)) - .mapping(MapperService.SINGLE_MAPPING_NAME, mappingAndSettings.v1(), XContentType.JSON) - .waitForActiveShards(ActiveShardCount.ALL) - .settings(mappingAndSettings.v2()); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, - new ActionListener() { - @Override - public void onResponse(CreateIndexResponse createIndexResponse) { - if (createIndexResponse.isAcknowledged()) { - andThen.run(); - } else { - consumer.accept(new ElasticsearchException("Failed to create security index")); - } - } - - @Override - public void onFailure(Exception e) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof ResourceAlreadyExistsException) { - // the index already exists - it was probably just created so this - // node hasn't yet received the cluster state update with the index - andThen.run(); - } else { - consumer.accept(e); - } + try { + // TODO we should improve this so we don't fire off a bunch of requests to do the same thing (create or update mappings) + if (indexState == State.UNRECOVERED_STATE) { + throw new ElasticsearchStatusException( + "Cluster state has not been recovered yet, cannot write to the [" + indexState.concreteIndexName + "] index", + RestStatus.SERVICE_UNAVAILABLE); + } else if (indexState.indexExists() && indexState.isIndexUpToDate == false) { + throw new IllegalStateException("Index [" + indexState.concreteIndexName + "] is not on the current version." + + "Security features relying on the index will not be available until the upgrade API is run on the index"); + } else if (indexState.indexExists() == false) { + assert indexState.concreteIndexName != null; + logger.info("security index does not exist. Creating [{}] with alias [{}]", indexState.concreteIndexName, this.aliasName); + final byte[] mappingSource = mappingSourceSupplier.get(); + final Tuple mappingAndSettings = parseMappingAndSettingsFromTemplateBytes(mappingSource); + CreateIndexRequest request = new CreateIndexRequest(indexState.concreteIndexName) + .alias(new Alias(this.aliasName)) + .mapping(MapperService.SINGLE_MAPPING_NAME, mappingAndSettings.v1(), XContentType.JSON) + .waitForActiveShards(ActiveShardCount.ALL) + .settings(mappingAndSettings.v2()); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(CreateIndexResponse createIndexResponse) { + if (createIndexResponse.isAcknowledged()) { + andThen.run(); + } else { + consumer.accept(new ElasticsearchException("Failed to create security index")); } - }, client.admin().indices()::create); - } else if (indexState.mappingUpToDate == false) { - logger.info("Index [{}] (alias [{}]) is not up to date. Updating mapping", indexState.concreteIndexName, this.aliasName); - final byte[] mappingSource = mappingSourceSupplier.get(); - final Tuple mappingAndSettings = parseMappingAndSettingsFromTemplateBytes(mappingSource); - PutMappingRequest request = new PutMappingRequest(indexState.concreteIndexName) - .source(mappingAndSettings.v1(), XContentType.JSON) - .type(MapperService.SINGLE_MAPPING_NAME); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, - ActionListener.wrap(putMappingResponse -> { - if (putMappingResponse.isAcknowledged()) { + } + + @Override + public void onFailure(Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ResourceAlreadyExistsException) { + // the index already exists - it was probably just created so this + // node hasn't yet received the cluster state update with the index andThen.run(); } else { - consumer.accept(new IllegalStateException("put mapping request was not acknowledged")); + consumer.accept(e); } - }, consumer), client.admin().indices()::putMapping); - } else { - andThen.run(); + } + }, client.admin().indices()::create); + } else if (indexState.mappingUpToDate == false) { + logger.info("Index [{}] (alias [{}]) is not up to date. Updating mapping", indexState.concreteIndexName, this.aliasName); + final byte[] mappingSource = mappingSourceSupplier.get(); + final Tuple mappingAndSettings = parseMappingAndSettingsFromTemplateBytes(mappingSource); + PutMappingRequest request = new PutMappingRequest(indexState.concreteIndexName) + .source(mappingAndSettings.v1(), XContentType.JSON) + .type(MapperService.SINGLE_MAPPING_NAME); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + ActionListener.wrap(putMappingResponse -> { + if (putMappingResponse.isAcknowledged()) { + andThen.run(); + } else { + consumer.accept(new IllegalStateException("put mapping request was not acknowledged")); + } + }, consumer), client.admin().indices()::putMapping); + } else { + andThen.run(); + } + } catch (Exception e) { + consumer.accept(e); } } @@ -428,7 +432,7 @@ private static byte[] readTemplateAsBytes(String templateName) { SecurityIndexManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); } - private static Tuple parseMappingAndSettingsFromTemplateBytes(byte[] template) { + private static Tuple parseMappingAndSettingsFromTemplateBytes(byte[] template) throws IOException { final PutIndexTemplateRequest request = new PutIndexTemplateRequest("name_is_not_important").source(template, XContentType.JSON); final String mappingSource = request.mappings().get(MapperService.SINGLE_MAPPING_NAME); try (XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, @@ -441,8 +445,6 @@ private static Tuple parseMappingAndSettingsFromTemplateBytes( XContentBuilder builder = JsonXContent.contentBuilder(); builder.generator().copyCurrentStructure(parser); return new Tuple<>(Strings.toString(builder), request.settings()); - } catch (IOException e) { - throw ExceptionsHelper.convertToRuntime(e); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 3df7c95ee56ef..ce4670150c5b8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -49,6 +49,7 @@ import org.junit.Before; import java.io.IOException; +import java.time.Instant; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedHashMap; @@ -57,6 +58,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.Supplier; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_MAIN_TEMPLATE_7; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.TEMPLATE_VERSION_PATTERN; @@ -67,6 +69,8 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; public class SecurityIndexManagerTests extends ESTestCase { @@ -97,6 +101,23 @@ void doExecute(ActionType action, Request request, ActionListener mappingSourceSupplier = () -> { + throw new RuntimeException(); + }; + Runnable runnable = mock(Runnable.class); + manager = new SecurityIndexManager(mock(Client.class), RestrictedIndicesNames.SECURITY_MAIN_ALIAS, + RestrictedIndicesNames.INTERNAL_SECURITY_MAIN_INDEX_7, SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT, + mappingSourceSupplier, state); + AtomicReference exceptionConsumer = new AtomicReference<>(); + manager.prepareIndexIfNeededThenExecute(e -> exceptionConsumer.set(e), runnable); + verify(runnable, never()).run(); + assertThat(exceptionConsumer.get(), is(notNullValue())); } public void testIndexWithUpToDateMappingAndTemplate() throws IOException { From 779009340054ef4d73bf62b59a871ab03037011c Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 25 Jul 2019 12:38:15 +0100 Subject: [PATCH 16/51] [ML-DataFrame] Muting tests for backport (#44850) Mutes data frame BWC tests prior to backporting #44768 --- .../test/mixed_cluster/80_data_frame_jobs_crud.yml | 12 ++++++++++++ .../test/old_cluster/80_data_frame_jobs_crud.yml | 6 ++++++ .../upgraded_cluster/80_data_frame_jobs_crud.yml | 6 ++++++ 3 files changed, 24 insertions(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml index abdd2dad6f916..96586d6823822 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml @@ -1,5 +1,8 @@ --- "Test put batch data frame transforms on mixed cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data" @@ -106,6 +109,9 @@ --- "Test put continuous data frame transform on mixed cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data-cont" @@ -168,6 +174,9 @@ --- "Test GET, start, and stop old cluster batch transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data" @@ -245,6 +254,9 @@ --- "Test GET, stop, start, old continuous transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data-cont" diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml index bf2b1f6b939f8..724d61f83a60c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml @@ -1,5 +1,8 @@ --- "Test put batch data frame transforms on old cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: indices.create: index: dataframe-transform-airline-data @@ -136,6 +139,9 @@ --- "Test put continuous data frame transform on old cluster": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: indices.create: index: dataframe-transform-airline-data-cont diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml index e149a249fff78..b8f349bf0dcbb 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml @@ -7,6 +7,9 @@ setup: timeout: 70s --- "Get start, stop, and delete old and mixed cluster batch data frame transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 # Simple and complex OLD transforms - do: data_frame.get_data_frame_transform: @@ -162,6 +165,9 @@ setup: --- "Test GET, stop, delete, old and mixed continuous transforms": + - skip: + version: "7.4.0 - " + reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: data_frame.get_data_frame_transform: transform_id: "old-simple-continuous-transform" From 5ba0fd3859a963e762093eb95a0cbb735dcbc38e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Thu, 25 Jul 2019 15:59:52 +0200 Subject: [PATCH 17/51] Treat PostDataActionResponse.DataCounts.bucketCount as incremental rather than absolute (total). (#44803) --- .../core/ml/datafeed/DatafeedTimingStats.java | 4 ++-- .../ml/datafeed/DatafeedTimingStatsTests.java | 8 ++++---- .../datafeed/DatafeedTimingStatsReporter.java | 2 +- .../autodetect/AutodetectProcessManager.java | 2 +- .../DatafeedTimingStatsReporterTests.java | 19 ++++++++++--------- 5 files changed, 18 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java index 4e2d51b2ebacd..85b1659d721a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStats.java @@ -114,8 +114,8 @@ public void incrementTotalSearchTimeMs(double searchTimeMs) { this.totalSearchTimeMs += searchTimeMs; } - public void setBucketCount(long bucketCount) { - this.bucketCount = bucketCount; + public void incrementBucketCount(long bucketCount) { + this.bucketCount += bucketCount; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java index e56475705eab1..e8d7798ba6cc0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java @@ -125,9 +125,9 @@ public void testIncrementTotalSearchTimeMs() { assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(30.0)); } - public void testSetBucketCount() { + public void testIncrementBucketCount() { DatafeedTimingStats stats = new DatafeedTimingStats(JOB_ID, 5, 10, 100.0); - stats.setBucketCount(20); + stats.incrementBucketCount(10); assertThat(stats.getJobId(), equalTo(JOB_ID)); assertThat(stats.getSearchCount(), equalTo(5L)); assertThat(stats.getBucketCount(), equalTo(20L)); @@ -141,7 +141,7 @@ public void testAvgSearchTimePerBucketIsCalculatedProperlyAfterUpdates() { assertThat(stats.getTotalSearchTimeMs(), equalTo(100.0)); assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(10.0)); - stats.setBucketCount(20); + stats.incrementBucketCount(10); assertThat(stats.getBucketCount(), equalTo(20L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(100.0)); assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(5.0)); @@ -151,7 +151,7 @@ public void testAvgSearchTimePerBucketIsCalculatedProperlyAfterUpdates() { assertThat(stats.getTotalSearchTimeMs(), equalTo(300.0)); assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(15.0)); - stats.setBucketCount(25); + stats.incrementBucketCount(5); assertThat(stats.getBucketCount(), equalTo(25L)); assertThat(stats.getTotalSearchTimeMs(), equalTo(300.0)); assertThat(stats.getAvgSearchTimePerBucketMs(), equalTo(12.0)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java index 202df616036f7..fbb32395f14ef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java @@ -57,7 +57,7 @@ public void reportDataCounts(DataCounts dataCounts) { if (dataCounts == null) { return; } - currentTimingStats.setBucketCount(dataCounts.getBucketCount()); + currentTimingStats.incrementBucketCount(dataCounts.getBucketCount()); flushIfDifferSignificantly(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index c6c91bcad09be..4745228285e3b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -223,7 +223,7 @@ public void persistJob(JobTask jobTask, Consumer handler) { * @param input Data input stream * @param xContentType the {@link XContentType} of the input * @param params Data processing parameters - * @param handler Delegate error or datacount results (Count of records, fields, bytes, etc written) + * @param handler Delegate error or datacount results (Count of records, fields, bytes, etc written as a result of this call) */ public void processData(JobTask jobTask, AnalysisRegistry analysisRegistry, InputStream input, XContentType xContentType, DataLoadParams params, BiConsumer handler) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java index e0aa9a696cd0e..9c48dd780a321 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java @@ -94,22 +94,17 @@ public void testReportDataCounts_Null() { } public void testReportDataCounts() { - DataCounts dataCounts = new DataCounts(JOB_ID); - dataCounts.incrementBucketCount(20); DatafeedTimingStatsReporter timingStatsReporter = - new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, dataCounts.getBucketCount(), 10000.0), jobResultsPersister); + new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, 20, 10000.0), jobResultsPersister); assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 20, 10000.0))); - dataCounts.incrementBucketCount(1); - timingStatsReporter.reportDataCounts(dataCounts); + timingStatsReporter.reportDataCounts(createDataCountsWithBucketCount(1)); assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 21, 10000.0))); - dataCounts.incrementBucketCount(1); - timingStatsReporter.reportDataCounts(dataCounts); + timingStatsReporter.reportDataCounts(createDataCountsWithBucketCount(1)); assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 22, 10000.0))); - dataCounts.incrementBucketCount(1); - timingStatsReporter.reportDataCounts(dataCounts); + timingStatsReporter.reportDataCounts(createDataCountsWithBucketCount(1)); assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 23, 10000.0))); InOrder inOrder = inOrder(jobResultsPersister); @@ -118,6 +113,12 @@ public void testReportDataCounts() { verifyNoMoreInteractions(jobResultsPersister); } + private static DataCounts createDataCountsWithBucketCount(long bucketCount) { + DataCounts dataCounts = new DataCounts(JOB_ID); + dataCounts.incrementBucketCount(bucketCount); + return dataCounts; + } + public void testTimingStatsDifferSignificantly() { assertThat( DatafeedTimingStatsReporter.differSignificantly( From bec16ef27877f7b19a59f8af7dc95b86a0621819 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 25 Jul 2019 15:34:13 +0100 Subject: [PATCH 18/51] [ML-DataFrame] Adjust data frame stats BWC following backport (#44852) This change adjusts the changes of #44768 to account for the backport to the 7.x branch in #44848. --- .../transforms/DataFrameIndexerTransformStats.java | 4 ++-- .../test/mixed_cluster/80_data_frame_jobs_crud.yml | 12 ------------ .../test/old_cluster/80_data_frame_jobs_crud.yml | 6 ------ .../upgraded_cluster/80_data_frame_jobs_crud.yml | 6 ------ 4 files changed, 2 insertions(+), 26 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java index 1fc8c2b826350..4ad6173f37c78 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java @@ -76,7 +76,7 @@ public DataFrameIndexerTransformStats(DataFrameIndexerTransformStats other) { public DataFrameIndexerTransformStats(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_0_0)) { // TODO change to 7.4.0 after backport + if (in.getVersion().before(Version.V_7_4_0)) { in.readString(); // was transformId } } @@ -84,7 +84,7 @@ public DataFrameIndexerTransformStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { // TODO change to 7.4.0 after backport + if (out.getVersion().before(Version.V_7_4_0)) { out.writeString(DEFAULT_TRANSFORM_ID); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml index 96586d6823822..abdd2dad6f916 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml @@ -1,8 +1,5 @@ --- "Test put batch data frame transforms on mixed cluster": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data" @@ -109,9 +106,6 @@ --- "Test put continuous data frame transform on mixed cluster": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data-cont" @@ -174,9 +168,6 @@ --- "Test GET, start, and stop old cluster batch transforms": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data" @@ -254,9 +245,6 @@ --- "Test GET, stop, start, old continuous transforms": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: cluster.health: index: "dataframe-transform-airline-data-cont" diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml index 724d61f83a60c..bf2b1f6b939f8 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml @@ -1,8 +1,5 @@ --- "Test put batch data frame transforms on old cluster": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: indices.create: index: dataframe-transform-airline-data @@ -139,9 +136,6 @@ --- "Test put continuous data frame transform on old cluster": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: indices.create: index: dataframe-transform-airline-data-cont diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml index b8f349bf0dcbb..e149a249fff78 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml @@ -7,9 +7,6 @@ setup: timeout: 70s --- "Get start, stop, and delete old and mixed cluster batch data frame transforms": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 # Simple and complex OLD transforms - do: data_frame.get_data_frame_transform: @@ -165,9 +162,6 @@ setup: --- "Test GET, stop, delete, old and mixed continuous transforms": - - skip: - version: "7.4.0 - " - reason: waiting backport of https://github.com/elastic/elasticsearch/pull/44768 - do: data_frame.get_data_frame_transform: transform_id: "old-simple-continuous-transform" From 6e7d0614d9ccf46e8de6181616a1e8040083ef8e Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Thu, 25 Jul 2019 16:37:36 +0200 Subject: [PATCH 19/51] do not assert on indexer state (#44854) remove the unreliable check for the state change fixes #44813 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 1593dfbb7551e..883ac7c02486e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -393,7 +393,6 @@ public void testFiveRuns() throws InterruptedException { indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); assertTrue(awaitBusy(() -> isFinished.get())); indexer.assertCounters(); } finally { From 5275392b474960a7e6f900810bc7495c3bde728c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Thu, 25 Jul 2019 16:45:06 +0200 Subject: [PATCH 20/51] [DOCS] Adds allow no datafeeds query param to the GET, GET stats and STOP datafeed APIs (#44499) --- .../apis/get-datafeed-stats.asciidoc | 25 +++++++++++++++++++ .../apis/get-datafeed.asciidoc | 24 ++++++++++++++++++ .../apis/stop-datafeed.asciidoc | 24 ++++++++++++++++++ 3 files changed, 73 insertions(+) diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc index 95cb7777b9b08..42db449e377e8 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc @@ -49,6 +49,24 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. or a wildcard expression. If you do not specify one of these options, the API returns statistics for all {dfeeds}. +[[ml-get-datafeed-stats-query-parms]] +==== {api-query-parms-title} + +`allow_no_datafeeds`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {datafeeds} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- + + [[ml-get-datafeed-stats-results]] ==== {api-response-body-title} @@ -58,6 +76,13 @@ The API returns the following information: (array) An array of {dfeed} count objects. For more information, see <>. +[[ml-get-datafeed-stats-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_datafeeds` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. + [[ml-get-datafeed-stats-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc index 6e93c1f9ce182..917599c4b941a 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc @@ -46,6 +46,23 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. or a wildcard expression. If you do not specify one of these options, the API returns information about all {dfeeds}. +[[ml-get-datafeed-query-parms]] +==== {api-query-parms-title} + +`allow_no_datafeeds`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {datafeeds} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- + [[ml-get-datafeed-results]] ==== {api-response-body-title} @@ -55,6 +72,13 @@ The API returns the following information: (array) An array of {dfeed} objects. For more information, see <>. +[[ml-get-datafeed-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_datafeeds` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. + [[ml-get-datafeed-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc index e0732b1428ff5..f849f0faf758b 100644 --- a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc @@ -43,6 +43,23 @@ comma-separated list of {dfeeds} or a wildcard expression. You can close all (Required, string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a wildcard expression. +[[ml-stop-datafeed-query-parms]] +==== {api-query-parms-title} + +`allow_no_datafeeds`:: + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {datafeeds} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array when +there are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a `404` status code when there +are no matches or only partial matches. +-- + [[ml-stop-datafeed-request-body]] ==== {api-request-body-title} @@ -53,6 +70,13 @@ comma-separated list of {dfeeds} or a wildcard expression. You can close all (Optional, time) Controls the amount of time to wait until a {dfeed} stops. The default value is 20 seconds. +[[ml-stop-datafeed-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_datafeeds` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. + [[ml-stop-datafeed-example]] ==== {api-examples-title} From 0dfcdf327a2376ad0860854486a7f5561a000d8d Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Thu, 25 Jul 2019 10:29:47 -0500 Subject: [PATCH 21/51] [GEO] Fix GeoShapeQueryBuilder to check for valid spatial relations Refactor left out the spatial strategy check in GeoShapeQueryBuilder.relation setter method. This commit adds that check back in. --- .../index/query/GeoShapeQueryBuilder.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index f9bb9680bc777..57bdb4446beac 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -173,6 +173,24 @@ public String getWriteableName() { return NAME; } + /** + * Sets the relation of query shape and indexed shape. + * + * @param relation relation of the shapes + * @return this + */ + public GeoShapeQueryBuilder relation(ShapeRelation relation) { + if (relation == null) { + throw new IllegalArgumentException("No Shape Relation defined"); + } + if (SpatialStrategy.TERM.equals(strategy) && relation != ShapeRelation.INTERSECTS) { + throw new IllegalArgumentException("current strategy [" + strategy.getStrategyName() + "] only supports relation [" + + ShapeRelation.INTERSECTS.getRelationName() + "] found relation [" + relation.getRelationName() + "]"); + } + this.relation = relation; + return this; + } + /** * Defines which spatial strategy will be used for building the geo shape * Query. When not set, the strategy that will be used will be the one that From 990e037728a5c8c5f05d7ca14e2ba7c261b4557f Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Jul 2019 08:58:16 -0700 Subject: [PATCH 22/51] [DOCS] Updates terms in anomaly detection job APIs (#44839) --- .../high-level/ml/close-job.asciidoc | 10 +++----- .../high-level/ml/delete-job.asciidoc | 25 +++++++++++-------- .../java-rest/high-level/ml/open-job.asciidoc | 11 ++++---- docs/java-rest/high-level/ml/put-job.asciidoc | 19 +++++++------- .../anomaly-detection/apis/close-job.asciidoc | 14 +++++------ .../apis/delete-job.asciidoc | 14 +++++------ .../anomaly-detection/apis/open-job.asciidoc | 11 ++++---- .../anomaly-detection/apis/put-job.asciidoc | 4 +-- 8 files changed, 54 insertions(+), 54 deletions(-) diff --git a/docs/java-rest/high-level/ml/close-job.asciidoc b/docs/java-rest/high-level/ml/close-job.asciidoc index 8a38b498629cf..bf14fa0f21a59 100644 --- a/docs/java-rest/high-level/ml/close-job.asciidoc +++ b/docs/java-rest/high-level/ml/close-job.asciidoc @@ -4,14 +4,12 @@ :response: CloseJobResponse -- [id="{upid}-{api}"] -=== Close Job API +=== Close {anomaly-job} API -The Close Job API provides the ability to close {ml} jobs in the cluster. -It accepts a +{request}+ object and responds -with a +{response}+ object. +Closes {anomaly-jobs} in the cluster. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Close Job Request +==== Close {anomaly-job} request A +{request}+ object gets created with an existing non-null `jobId`. @@ -28,7 +26,7 @@ which has not responded to its initial close request. execution should wait for the job to be closed. [id="{upid}-{api}-response"] -==== Close Job Response +==== Close {anomaly-job} response ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/delete-job.asciidoc b/docs/java-rest/high-level/ml/delete-job.asciidoc index a8c6b276dd484..06c82c0a11258 100644 --- a/docs/java-rest/high-level/ml/delete-job.asciidoc +++ b/docs/java-rest/high-level/ml/delete-job.asciidoc @@ -4,10 +4,12 @@ :response: AcknowledgedResponse -- [id="{upid}-{api}"] -=== Delete Job API +=== Delete {anomaly-job} API + +Deletes an {anomaly-job} that exists in the cluster. [id="{upid}-{api}-request"] -==== Delete Job Request +==== Delete {anomaly-job} request A +{request}+ object requires a non-null `jobId` and can optionally set `force`. @@ -17,7 +19,7 @@ include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- <1> Constructing a new request referencing an existing `jobId` -==== Optional Arguments +==== Optional arguments The following arguments are optional: @@ -33,21 +35,24 @@ Defaults to `false`. --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-wait-for-completion] --------------------------------------------------- -<1> Use to set whether the request should wait until the operation has completed before returning. -Defaults to `true`. +<1> Use to set whether the request should wait until the operation has completed +before returning. Defaults to `true`. [id="{upid}-{api}-response"] -==== Delete Job Response +==== Delete {anomaly-job} response -The returned +{response}+ object indicates the acknowledgement of the job deletion or -the deletion task depending on whether the request was set to wait for completion: +The returned +{response}+ object indicates the acknowledgement of the job +deletion or the deletion task depending on whether the request was set to wait +for completion: ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] --------------------------------------------------- -<1> whether was job deletion was acknowledged or not; will be `null` when set not to wait for completion -<2> the id of the job deletion task; will be `null` when set to wait for completion +<1> whether was job deletion was acknowledged or not; will be `null` when set +not to wait for completion +<2> the id of the job deletion task; will be `null` when set to wait for +completion include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ml/open-job.asciidoc b/docs/java-rest/high-level/ml/open-job.asciidoc index 9b3ec11a7cc13..b6f4e8ed1ebe4 100644 --- a/docs/java-rest/high-level/ml/open-job.asciidoc +++ b/docs/java-rest/high-level/ml/open-job.asciidoc @@ -4,14 +4,13 @@ :response: OpenJobResponse -- [id="{upid}-{api}"] -=== Open Job API +=== Open {anomaly-job} API -The Open Job API provides the ability to open {ml} jobs in the cluster. -It accepts a +{request}+ object and responds -with a +{response}+ object. +Opens {anomaly-jobs} in the cluster. It accepts a +{request}+ object and +responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Open Job Request +==== Open {anomaly-job} request An +{request}+ object gets created with an existing non-null `jobId`. @@ -24,7 +23,7 @@ include-tagged::{doc-tests-file}[{api}-request] execution should wait for the job to be opened. [id="{upid}-{api}-response"] -==== Open Job Response +==== Open {anomaly-job} response ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/put-job.asciidoc b/docs/java-rest/high-level/ml/put-job.asciidoc index 9934fc6b94ab0..081c94782fa8a 100644 --- a/docs/java-rest/high-level/ml/put-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-job.asciidoc @@ -4,14 +4,13 @@ :response: PutJobResponse -- [id="{upid}-{api}"] -=== Put Job API +=== Put {anomaly-job} API -The Put Job API can be used to create a new {ml} job -in the cluster. The API accepts a +{request}+ object +Creates a new {anomaly-job} in the cluster. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Put Job Request +==== Put {anomaly-job} request A +{request}+ requires the following argument: @@ -19,12 +18,12 @@ A +{request}+ requires the following argument: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> The configuration of the {ml} job to create as a `Job` +<1> The configuration of the {anomaly-job} to create as a `Job` [id="{upid}-{api}-config"] -==== Job Configuration +==== Job configuration -The `Job` object contains all the details about the {ml} job +The `Job` object contains all the details about the {anomaly-job} configuration. A `Job` requires the following arguments: @@ -39,9 +38,9 @@ include-tagged::{doc-tests-file}[{api}-config] <4> Optionally, a human-readable description [id="{upid}-{api}-analysis-config"] -==== Analysis Configuration +==== Analysis configuration -The analysis configuration of the {ml} job is defined in the `AnalysisConfig`. +The analysis configuration of the {anomaly-job} is defined in the `AnalysisConfig`. `AnalysisConfig` reflects all the configuration settings that can be defined using the REST API. @@ -86,7 +85,7 @@ include-tagged::{doc-tests-file}[{api}-analysis-config] <3> The bucket span [id="{upid}-{api}-data-description"] -==== Data Description +==== Data description After defining the analysis config, the next thing to define is the data description, using a `DataDescription` instance. `DataDescription` diff --git a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index de64754b098d2..4bdabd3ce918e 100644 --- a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="platinum"] [[ml-close-job]] -=== Close jobs API +=== Close {anomaly-jobs} API ++++ Close jobs ++++ -Closes one or more jobs. +Closes one or more {anomaly-jobs}. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis @@ -31,9 +31,9 @@ operations, but you can still explore and navigate results. [[ml-close-job-desc]] ==== {api-description-title} -You can close multiple jobs in a single API request by using a group name, a -comma-separated list of jobs, or a wildcard expression. You can close all jobs -by using `_all` or by specifying `*` as the ``. +You can close multiple {anomaly-jobs} in a single API request by using a group +name, a comma-separated list of jobs, or a wildcard expression. You can close +all jobs by using `_all` or by specifying `*` as the ``. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. @@ -58,8 +58,8 @@ results the job might have recently produced or might produce in the future. ==== {api-path-parms-title} ``:: - (Required, string) Identifier for the job. It can be a job identifier, a group - name, or a wildcard expression. + (Required, string) Identifier for the {anomaly-job}. It can be a job + identifier, a group name, or a wildcard expression. [[ml-close-job-query-parms]] ==== {api-query-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc index bd5a74e50e89a..506e224f94326 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-job]] -=== Delete jobs API +=== Delete {anomaly-jobs} API ++++ Delete jobs ++++ -Deletes an existing anomaly detection job. +Deletes an existing {anomaly-job}. [[ml-delete-job-request]] ==== {api-request-title} @@ -25,14 +25,14 @@ cluster privileges to use this API. See All job configuration, model state and results are deleted. -IMPORTANT: Deleting a job must be done via this API only. Do not delete the -job directly from the `.ml-*` indices using the Elasticsearch delete document +IMPORTANT: Deleting an {anomaly-job} must be done via this API only. Do not +delete the job directly from the `.ml-*` indices using the {es} delete document API. When {es} {security-features} are enabled, make sure no `write` privileges are granted to anyone over the `.ml-*` indices. Before you can delete a job, you must delete the {dfeeds} that are associated -with it. See <>. Unless the `force` parameter -is used the job must be closed before it can be deleted. +with it. See <>. Unless the `force` +parameter is used the job must be closed before it can be deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. @@ -41,7 +41,7 @@ separated list. ==== {api-path-parms-title} ``:: - (Required, string) Identifier for the job. + (Required, string) Identifier for the {anomaly-job}. [[ml-delete-job-query-parms]] ==== {api-query-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc index 23ccd1586ed75..8761bf8342840 100644 --- a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="platinum"] [[ml-open-job]] -=== Open jobs API +=== Open {anomaly-jobs} API ++++ Open jobs ++++ -Opens one or more jobs. +Opens one or more {anomaly-jobs}. [[ml-open-job-request]] ==== {api-request-title} @@ -23,8 +23,9 @@ Opens one or more jobs. [[ml-open-job-desc]] ==== {api-description-title} -A job must be opened in order for it to be ready to receive and analyze data. -A job can be opened and closed multiple times throughout its lifecycle. +An {anomaly-job} must be opened in order for it to be ready to receive and +analyze data. It can be opened and closed multiple times throughout its +lifecycle. When you open a new job, it starts with an empty model. @@ -36,7 +37,7 @@ data is received. ==== {api-path-parms-title} ``:: - (Required, string) Identifier for the job + (Required, string) Identifier for the {anomaly-job}. [[ml-open-job-request-body]] ==== {api-request-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index dd32bb108d784..8d4044c7d6c34 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -2,10 +2,8 @@ [testenv="platinum"] [[ml-put-job]] === Create {anomaly-jobs} API - -[subs="attributes"] ++++ -Create {anomaly-jobs} +Create jobs ++++ Instantiates an {anomaly-job}. From c9b585eacbd8941d6c533bb3051042ff6feb35e3 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Jul 2019 09:13:26 -0700 Subject: [PATCH 23/51] [DOCS] Fixes terms in HLRC data frame transform APIs (#44838) --- .../dataframe/delete_data_frame.asciidoc | 4 ++-- .../dataframe/get_data_frame.asciidoc | 14 +++++++------- .../dataframe/get_data_frame_stats.asciidoc | 15 ++++++++------- .../dataframe/preview_data_frame.asciidoc | 10 +++++----- .../dataframe/put_data_frame.asciidoc | 12 ++++++------ .../dataframe/start_data_frame.asciidoc | 16 +++++++++------- .../dataframe/stop_data_frame.asciidoc | 18 +++++++++--------- .../high-level/execution-no-req.asciidoc | 4 ++-- docs/java-rest/high-level/execution.asciidoc | 4 ++-- .../high-level/supported-apis.asciidoc | 6 ++++-- 10 files changed, 54 insertions(+), 49 deletions(-) diff --git a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc index c90795c71fe07..4699d165e6015 100644 --- a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc @@ -4,10 +4,10 @@ :response: AcknowledgedResponse -- [id="{upid}-{api}"] -=== Delete Data Frame Transform API +=== Delete {dataframe-transform} API [id="{upid}-{api}-request"] -==== Delete Data Frame Transform Request +==== Delete {dataframe-transform} request A +{request}+ object requires a non-null `id`. diff --git a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc index 995d9d2c08963..ba4103e2fc42e 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc @@ -4,16 +4,16 @@ :response: GetDataFrameTransformResponse -- [id="{upid}-{api}"] -=== Get Data Frame Transform API +=== Get {dataframe-transform} API -The Get Data Frame Transform API is used get one or more {dataframe-transform}. +The get {dataframe-transform} API is used get one or more {dataframe-transforms}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] -==== Get Data Frame Request +==== Get {dataframe-transform} request -A +{request}+ requires either a data frame transform id, a comma separated list of ids or -the special wildcard `_all` to get all {dataframe-transforms} +A +{request}+ requires either a {dataframe-transform} ID, a comma separated list +of ids or the special wildcard `_all` to get all {dataframe-transforms}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -21,7 +21,7 @@ include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Constructing a new GET request referencing an existing {dataframe-transform} -==== Optional Arguments +==== Optional arguments The following arguments are optional. @@ -40,7 +40,7 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ contains the requested {dataframe-transform}s. +The returned +{response}+ contains the requested {dataframe-transforms}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc index dbed7971ff277..4c68d352606b8 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc @@ -4,17 +4,17 @@ :response: GetDataFrameTransformStatsResponse -- [id="{upid}-{api}"] -=== Get Data Frame Transform Stats API +=== Get {dataframe-transform} stats API -The Get Data Frame Transform Stats API is used read the operational statistics -of one or more {dataframe-transform}s. +The get {dataframe-transform} stats API is used read the operational statistics +of one or more {dataframe-transforms}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] -==== Get Data Frame Transform Stats Request +==== Get {dataframe-transform} stats request A +{request}+ requires a data frame transform id or the special wildcard `_all` -to get the statistics for all {dataframe-transform}s +to get the statistics for all {dataframe-transforms}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -30,8 +30,9 @@ The following arguments are optional. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- -<1> The page parameters `from` and `size`. `from` specifies the number of data frame transform stats to skip. -`size` specifies the maximum number of data frame transform stats to get. +<1> The page parameters `from` and `size`. `from` specifies the number of +{dataframe-transform} stats to skip. +`size` specifies the maximum number of {dataframe-transform} stats to get. Defaults to `0` and `100` respectively. <2> Whether to ignore if a wildcard expression matches no transforms. diff --git a/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc index df7e461fa0919..88f5fc9ad2e34 100644 --- a/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc @@ -4,17 +4,17 @@ :response: PreviewDataFrameTransformResponse -- [id="{upid}-{api}"] -=== Preview Data Frame Transform API +=== Preview {dataframe-transform} API -The Preview Data Frame Transform API is used to preview the results of +The preview {dataframe-transform} API is used to preview the results of a {dataframe-transform}. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Preview Data Frame Request +==== Preview {dataframe-transform} request -A +{request}+ takes a single argument: a valid data frame transform config. +A +{request}+ takes a single argument: a valid {dataframe-transform} config. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -22,7 +22,7 @@ include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> The source config from which the data should be gathered <2> The pivot config used to transform the data -<3> The configuration of the {dataframe-job} to preview +<3> The configuration of the {dataframe-transform} to preview include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc index 50362d2fc4a07..293722aa65bec 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc @@ -4,14 +4,14 @@ :response: AcknowledgedResponse -- [id="{upid}-{api}"] -=== Put Data Frame Transform API +=== Put {dataframe-transform} API -The Put Data Frame Transform API is used to create a new {dataframe-transform}. +The put {dataframe-transform} API is used to create a new {dataframe-transform}. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Put Data Frame Request +==== Put {dataframe-transform} request A +{request}+ requires the following argument: @@ -26,10 +26,10 @@ with the privileges of the user creating it. Meaning, if they do not have privil such an error will not be visible until `_start` is called. [id="{upid}-{api}-config"] -==== Data Frame Transform Configuration +==== {dataframe-transform-cap} configuration -The `DataFrameTransformConfig` object contains all the details about the {dataframe-transform} -configuration and contains the following arguments: +The `DataFrameTransformConfig` object contains all the details about the +{dataframe-transform} configuration and contains the following arguments: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc index 6e18eb877289b..eeb2c2eeb6112 100644 --- a/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc @@ -4,13 +4,13 @@ :response: StartDataFrameTransformResponse -- [id="{upid}-{api}"] -=== Start Data Frame Transform API +=== Start {dataframe-transform} API -Start a {dataframe-job}. +Start a {dataframe-transform}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Start Data Frame Request +==== Start {dataframe-transform} request A +{request}+ object requires a non-null `id`. @@ -18,9 +18,10 @@ A +{request}+ object requires a non-null `id`. --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new start request referencing an existing {dataframe-job} +<1> Constructing a new start request referencing an existing +{dataframe-transform} -==== Optional Arguments +==== Optional arguments The following arguments are optional. @@ -28,10 +29,11 @@ The following arguments are optional. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- -<1> Controls the amount of time to wait until the {dataframe-job} starts. +<1> Controls the amount of time to wait until the {dataframe-transform} starts. include::../execution.asciidoc[] ==== Response -The returned +{response}+ object acknowledges the {dataframe-job} has started. \ No newline at end of file +The returned +{response}+ object acknowledges the {dataframe-transform} has +started. \ No newline at end of file diff --git a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc index 1de4af5c5d592..60981a17a8fb2 100644 --- a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc @@ -4,25 +4,25 @@ :response: StopDataFrameTransformResponse -- [id="{upid}-{api}"] -=== Stop Data Frame Transform API +=== Stop {dataframe-transform} API -Stop a started {dataframe-job}. +Stop a started {dataframe-transform}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Stop Data Frame Request +==== Stop {dataframe-transform} request -A +{request}+ object requires a non-null `id`. `id` can be a comma separated list of Ids -or a single Id. Wildcards, `*` and `_all` are also accepted. +A +{request}+ object requires a non-null `id`. `id` can be a comma separated +list of IDs or a single ID. Wildcards, `*` and `_all` are also accepted. ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new stop request referencing an existing {dataframe-job} +<1> Constructing a new stop request referencing an existing {dataframe-transform} -==== Optional Arguments +==== Optional arguments The following arguments are optional. @@ -31,11 +31,11 @@ The following arguments are optional. include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- <1> If true wait for the data frame task to stop before responding -<2> Controls the amount of time to wait until the {dataframe-job} stops. +<2> Controls the amount of time to wait until the {dataframe-transform} stops. <3> Whether to ignore if a wildcard expression matches no transforms. include::../execution.asciidoc[] ==== Response -The returned +{response}+ object acknowledges the {dataframe-job} has stopped. \ No newline at end of file +The returned +{response}+ object acknowledges the {dataframe-transform} has stopped. \ No newline at end of file diff --git a/docs/java-rest/high-level/execution-no-req.asciidoc b/docs/java-rest/high-level/execution-no-req.asciidoc index 21ae5a909bde9..e9a2780d1bc31 100644 --- a/docs/java-rest/high-level/execution-no-req.asciidoc +++ b/docs/java-rest/high-level/execution-no-req.asciidoc @@ -5,7 +5,7 @@ For methods with requests, see execution.asciidoc //// [id="{upid}-{api}-sync"] -==== Synchronous Execution +==== Synchronous execution When executing the +{api}+ API in the following manner, the client waits for the +{response}+ to be returned before continuing with code execution: @@ -25,7 +25,7 @@ a generic `ElasticsearchException` and adds the original `ResponseException` as suppressed exception to it. [id="{upid}-{api}-async"] -==== Asynchronous Execution +==== Asynchronous execution The +{api}+ API can also be called in an asynchronous fashion so that the client can return directly. Users need to specify how the response or diff --git a/docs/java-rest/high-level/execution.asciidoc b/docs/java-rest/high-level/execution.asciidoc index 1028d9b6975c7..cbc44a24f6c98 100644 --- a/docs/java-rest/high-level/execution.asciidoc +++ b/docs/java-rest/high-level/execution.asciidoc @@ -8,7 +8,7 @@ test. //// [id="{upid}-{api}-sync"] -==== Synchronous Execution +==== Synchronous execution When executing a +{request}+ in the following manner, the client waits for the +{response}+ to be returned before continuing with code execution: @@ -28,7 +28,7 @@ a generic `ElasticsearchException` and adds the original `ResponseException` as suppressed exception to it. [id="{upid}-{api}-async"] -==== Asynchronous Execution +==== Asynchronous execution Executing a +{request}+ can also be done in an asynchronous fashion so that the client can return directly. Users need to specify how the response or diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 27f5f38136d2e..0aecc5f0021fc 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -564,12 +564,14 @@ include::ilm/lifecycle_management_status.asciidoc[] include::ilm/retry_lifecycle_policy.asciidoc[] include::ilm/remove_lifecycle_policy_from_index.asciidoc[] -== Data Frame APIs +[[_data_frame_transform_apis]] +== {dataframe-transform-cap} APIs :upid: {mainid}-dataframe :doc-tests-file: {doc-tests}/DataFrameTransformDocumentationIT.java -The Java High Level REST Client supports the following Data Frame APIs: +The Java High Level REST Client supports the following {dataframe-transform} +APIs: * <<{upid}-get-data-frame-transform>> * <<{upid}-get-data-frame-transform-stats>> From 9b164866152f01b7669d254f68ea2b6d640d3713 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Jul 2019 10:00:06 -0700 Subject: [PATCH 24/51] [DOCS] Minor edits to HLRC ML APIs (#44865) --- .../high-level/dataframe/delete_data_frame.asciidoc | 2 ++ .../high-level/dataframe/get_data_frame.asciidoc | 2 +- .../dataframe/get_data_frame_stats.asciidoc | 5 ++--- .../dataframe/preview_data_frame.asciidoc | 3 +-- .../high-level/dataframe/put_data_frame.asciidoc | 2 +- .../high-level/dataframe/start_data_frame.asciidoc | 2 +- .../high-level/dataframe/stop_data_frame.asciidoc | 2 +- docs/java-rest/high-level/ml/delete-job.asciidoc | 13 ++++++------- 8 files changed, 15 insertions(+), 16 deletions(-) diff --git a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc index 4699d165e6015..5f04aa03718b5 100644 --- a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc @@ -6,6 +6,8 @@ [id="{upid}-{api}"] === Delete {dataframe-transform} API +Deletes an existing {dataframe-transform}. + [id="{upid}-{api}-request"] ==== Delete {dataframe-transform} request diff --git a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc index ba4103e2fc42e..160dc378e729a 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc @@ -6,7 +6,7 @@ [id="{upid}-{api}"] === Get {dataframe-transform} API -The get {dataframe-transform} API is used get one or more {dataframe-transforms}. +Retrieves configuration information about one or more {dataframe-transforms}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] diff --git a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc index 4c68d352606b8..578ea808b9e3d 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc @@ -6,8 +6,7 @@ [id="{upid}-{api}"] === Get {dataframe-transform} stats API -The get {dataframe-transform} stats API is used read the operational statistics -of one or more {dataframe-transforms}. +Retrieves the operational statistics of one or more {dataframe-transforms}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] @@ -22,7 +21,7 @@ include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Constructing a new GET Stats request referencing an existing {dataframe-transform} -==== Optional Arguments +==== Optional arguments The following arguments are optional. diff --git a/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc index 88f5fc9ad2e34..26453e5d4968f 100644 --- a/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc @@ -6,8 +6,7 @@ [id="{upid}-{api}"] === Preview {dataframe-transform} API -The preview {dataframe-transform} API is used to preview the results of -a {dataframe-transform}. +Previews the results of a {dataframe-transform}. The API accepts a +{request}+ object as a request and returns a +{response}+. diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc index 293722aa65bec..2de25fde30e2c 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc @@ -6,7 +6,7 @@ [id="{upid}-{api}"] === Put {dataframe-transform} API -The put {dataframe-transform} API is used to create a new {dataframe-transform}. +Creates a new {dataframe-transform}. The API accepts a +{request}+ object as a request and returns a +{response}+. diff --git a/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc index eeb2c2eeb6112..b62f410180c1d 100644 --- a/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc @@ -6,7 +6,7 @@ [id="{upid}-{api}"] === Start {dataframe-transform} API -Start a {dataframe-transform}. +Starts a {dataframe-transform}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] diff --git a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc index 60981a17a8fb2..af364501d0d41 100644 --- a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc @@ -6,7 +6,7 @@ [id="{upid}-{api}"] === Stop {dataframe-transform} API -Stop a started {dataframe-transform}. +Stops a started {dataframe-transform}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] diff --git a/docs/java-rest/high-level/ml/delete-job.asciidoc b/docs/java-rest/high-level/ml/delete-job.asciidoc index 06c82c0a11258..300b3edef686b 100644 --- a/docs/java-rest/high-level/ml/delete-job.asciidoc +++ b/docs/java-rest/high-level/ml/delete-job.asciidoc @@ -27,9 +27,8 @@ The following arguments are optional: --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-force] --------------------------------------------------- -<1> Use to forcefully delete an opened job; -this method is quicker than closing and deleting the job. -Defaults to `false`. +<1> Use to forcefully delete an opened job. This method is quicker than closing +and deleting the job. Defaults to `false`. ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- @@ -50,9 +49,9 @@ for completion: --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] --------------------------------------------------- -<1> whether was job deletion was acknowledged or not; will be `null` when set -not to wait for completion -<2> the id of the job deletion task; will be `null` when set to wait for -completion +<1> Whether job deletion was acknowledged or not. It will be `null` when set +to not wait for completion. +<2> The ID of the job deletion task. It will be `null` when set to wait for +completion. include::../execution.asciidoc[] From 729aca5b8d5a71ccac6cedac820daa1a81c64927 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 25 Jul 2019 10:34:37 -0700 Subject: [PATCH 25/51] Fix an NPE when requesting inner hits and _source is disabled. (#44836) This PR makes two changes to FetchSourceSubPhase when _source is disabled and we're in a nested context: * If no source filters are provided, return early to avoid an NPE. * If there are source filters, make sure to throw an exception. The behavior was chosen to match what currently happens in a non-nested context. --- .../fetch/subphase/FetchSourceSubPhase.java | 24 +++++++++++++------ .../subphase/FetchSourceSubPhaseTests.java | 11 +++++++++ 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java index a7f333abfa2ef..fa099392f40e7 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java @@ -42,17 +42,23 @@ public void hitExecute(SearchContext context, HitContext hitContext) { SourceLookup source = context.lookup().source(); FetchSourceContext fetchSourceContext = context.fetchSourceContext(); assert fetchSourceContext.fetchSource(); - if (nestedHit == false) { - if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) { - hitContext.hit().sourceRef(source.internalSourceRef()); - return; - } - if (source.internalSourceRef() == null) { + + // If source is disabled in the mapping, then attempt to return early. + if (source.source() == null && source.internalSourceRef() == null) { + if (containsFilters(fetchSourceContext)) { throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " + - "for index [" + context.indexShard().shardId().getIndexName() + "]"); + "for index [" + context.indexShard().shardId().getIndexName() + "]"); } + return; + } + + // If this is a parent document and there are no source filters, then add the source as-is. + if (nestedHit == false && containsFilters(fetchSourceContext) == false) { + hitContext.hit().sourceRef(source.internalSourceRef()); + return; } + // Otherwise, filter the source and add it to the hit. Object value = source.filter(fetchSourceContext); if (nestedHit) { value = getNestedSource((Map) value, hitContext); @@ -79,6 +85,10 @@ public void hitExecute(SearchContext context, HitContext hitContext) { } } + private static boolean containsFilters(FetchSourceContext context) { + return context.includes().length != 0 || context.excludes().length != 0; + } + private Map getNestedSource(Map sourceAsMap, HitContext hitContext) { for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) { sourceAsMap = (Map) sourceAsMap.get(o.getField().string()); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index 4b62e77393865..f8cb5f751361c 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -119,6 +119,17 @@ public void testSourceDisabled() throws IOException { "for index [index]", exception.getMessage()); } + public void testNestedSourceWithSourceDisabled() { + FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null, + new SearchHit.NestedIdentity("nested1", 0, null)); + assertNull(hitContext.hit().getSourceAsMap()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> hitExecute(null, true, "field1", null, new SearchHit.NestedIdentity("nested1", 0, null))); + assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [index]", e.getMessage()); + } + private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) { return hitExecute(source, fetchSource, include, exclude, null); } From 76fcc8127573c6b292fd6905361aba75e19b120c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 25 Jul 2019 20:17:51 +0200 Subject: [PATCH 26/51] Add Clone Index API (#44267) Adds an API to clone an index. This is similar to the index split and shrink APIs, just with the difference that the number of primary shards is kept the same. In case where the filesystem provides hard-linking capabilities, this is a very cheap operation. Indexing cloning can be done by running `POST my_source_index/_clone/my_target_index` and it supports the same options as the split and shrink APIs. Closes #44128 --- .../elasticsearch/client/IndicesClient.java | 27 ++++ .../client/IndicesRequestConverters.java | 7 + .../elasticsearch/client/IndicesClientIT.java | 24 +++ .../client/IndicesRequestConvertersTests.java | 23 ++- .../IndicesClientDocumentationIT.java | 69 +++++++++ .../high-level/indices/clone_index.asciidoc | 80 ++++++++++ .../high-level/supported-apis.asciidoc | 2 + docs/reference/indices.asciidoc | 3 + docs/reference/indices/clone-index.asciidoc | 138 ++++++++++++++++++ .../rest-api-spec/api/indices.clone.json | 39 +++++ .../test/indices.clone/10_basic.yml | 111 ++++++++++++++ .../test/indices.clone/20_source_mapping.yml | 65 +++++++++ .../test/indices.clone/30_copy_settings.yml | 61 ++++++++ .../elasticsearch/action/ActionModule.java | 1 + .../admin/indices/shrink/ResizeRequest.java | 4 + .../admin/indices/shrink/ResizeType.java | 2 +- .../indices/shrink/TransportResizeAction.java | 15 +- .../cluster/metadata/IndexMetaData.java | 19 ++- .../metadata/MetaDataCreateIndexService.java | 16 +- .../decider/ResizeAllocationDecider.java | 6 +- .../admin/indices/RestResizeHandler.java | 20 +++ .../admin/indices/create/CloneIndexIT.java | 126 ++++++++++++++++ .../cluster/metadata/IndexMetaDataTests.java | 3 +- 23 files changed, 844 insertions(+), 17 deletions(-) create mode 100644 docs/java-rest/high-level/indices/clone_index.asciidoc create mode 100644 docs/reference/indices/clone-index.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 6fa3a66a79ca6..9394495313df4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -909,6 +909,33 @@ public void splitAsync(ResizeRequest resizeRequest, RequestOptions options, Acti ResizeResponse::fromXContent, listener, emptySet()); } + /** + * Clones an index using the Clone Index API. + * See + * Clone Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ResizeResponse clone(ResizeRequest resizeRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, IndicesRequestConverters::clone, options, + ResizeResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously clones an index using the Clone Index API. + * See + * Clone Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void cloneAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::clone, options, + ResizeResponse::fromXContent, listener, emptySet()); + } + /** * Rolls over an index using the Rollover Index API. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index c0b2f565e534f..62f041d6c6801 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -337,6 +337,13 @@ static Request shrink(ResizeRequest resizeRequest) throws IOException { return resize(resizeRequest); } + static Request clone(ResizeRequest resizeRequest) throws IOException { + if (resizeRequest.getResizeType() != ResizeType.CLONE) { + throw new IllegalArgumentException("Wrong resize type [" + resizeRequest.getResizeType() + "] for indices clone request"); + } + return resize(resizeRequest); + } + private static Request resize(ResizeRequest resizeRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder().addPathPart(resizeRequest.getSourceIndex()) .addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT)) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index a3c40d6a9d7c3..17b3121cd0b62 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -1128,6 +1128,30 @@ public void testSplit() throws IOException { assertNotNull(aliasData); } + @SuppressWarnings("unchecked") + public void testClone() throws IOException { + createIndex("source", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0) + .put("index.number_of_routing_shards", 4).build()); + updateIndexSettings("source", Settings.builder().put("index.blocks.write", true)); + + ResizeRequest resizeRequest = new ResizeRequest("target", "source"); + resizeRequest.setResizeType(ResizeType.CLONE); + Settings targetSettings = Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build(); + resizeRequest.setTargetIndex(new org.elasticsearch.action.admin.indices.create.CreateIndexRequest("target") + .settings(targetSettings) + .alias(new Alias("alias"))); + ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::clone, highLevelClient().indices()::cloneAsync); + assertTrue(resizeResponse.isAcknowledged()); + assertTrue(resizeResponse.isShardsAcknowledged()); + Map getIndexResponse = getAsMap("target"); + Map indexSettings = (Map)XContentMapValues.extractValue("target.settings.index", getIndexResponse); + assertNotNull(indexSettings); + assertEquals("2", indexSettings.get("number_of_shards")); + assertEquals("0", indexSettings.get("number_of_replicas")); + Map aliasData = (Map)XContentMapValues.extractValue("target.aliases.alias", getIndexResponse); + assertNotNull(aliasData); + } + public void testRollover() throws IOException { highLevelClient().indices().create(new CreateIndexRequest("test").alias(new Alias("alias")), RequestOptions.DEFAULT); RolloverRequest rolloverRequest = new RolloverRequest("alias", "test_new"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index a8728b90023f9..ee3ec3b50cc46 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -830,18 +830,33 @@ public void testSplit() throws IOException { public void testSplitWrongResizeType() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); - resizeRequest.setResizeType(ResizeType.SHRINK); + ResizeType wrongType = randomFrom(ResizeType.SHRINK, ResizeType.CLONE); + resizeRequest.setResizeType(wrongType); IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, () -> IndicesRequestConverters.split(resizeRequest)); - Assert.assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage()); + Assert.assertEquals("Wrong resize type [" + wrongType.name() + "] for indices split request", iae.getMessage()); + } + + public void testClone() throws IOException { + resizeTest(ResizeType.CLONE, IndicesRequestConverters::clone); + } + + public void testCloneWrongResizeType() { + ResizeRequest resizeRequest = new ResizeRequest("target", "source"); + ResizeType wrongType = randomFrom(ResizeType.SHRINK, ResizeType.SPLIT); + resizeRequest.setResizeType(wrongType); + IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, () + -> IndicesRequestConverters.clone(resizeRequest)); + Assert.assertEquals("Wrong resize type [" + wrongType.name() + "] for indices clone request", iae.getMessage()); } public void testShrinkWrongResizeType() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); - resizeRequest.setResizeType(ResizeType.SPLIT); + ResizeType wrongType = randomFrom(ResizeType.SPLIT, ResizeType.CLONE); + resizeRequest.setResizeType(wrongType); IllegalArgumentException iae = LuceneTestCase.expectThrows(IllegalArgumentException.class, () -> IndicesRequestConverters.shrink(resizeRequest)); - Assert.assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage()); + Assert.assertEquals("Wrong resize type [" + wrongType.name() + "] for indices shrink request", iae.getMessage()); } public void testShrink() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index ddd9241a49316..6a01400e006a1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -1808,6 +1808,75 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testCloneIndex() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createIndex("source_index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + updateIndexSettings("source_index", Settings.builder().put("index.blocks.write", true)); + } + + // tag::clone-index-request + ResizeRequest request = new ResizeRequest("target_index","source_index"); // <1> + request.setResizeType(ResizeType.CLONE); // <2> + // end::clone-index-request + + // tag::clone-index-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::clone-index-request-timeout + // tag::clone-index-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::clone-index-request-masterTimeout + // tag::clone-index-request-waitForActiveShards + request.setWaitForActiveShards(2); // <1> + request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <2> + // end::clone-index-request-waitForActiveShards + // tag::clone-index-request-settings + request.getTargetIndexRequest().settings(Settings.builder() + .put("index.number_of_shards", 2)); // <1> + // end::clone-index-request-settings + // tag::clone-index-request-aliases + request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1> + // end::clone-index-request-aliases + + // tag::clone-index-execute + ResizeResponse resizeResponse = client.indices().clone(request, RequestOptions.DEFAULT); + // end::clone-index-execute + + // tag::clone-index-response + boolean acknowledged = resizeResponse.isAcknowledged(); // <1> + boolean shardsAcked = resizeResponse.isShardsAcknowledged(); // <2> + // end::clone-index-response + assertTrue(acknowledged); + assertTrue(shardsAcked); + + // tag::clone-index-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ResizeResponse resizeResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::clone-index-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::clone-index-execute-async + client.indices().cloneAsync(request, RequestOptions.DEFAULT,listener); // <1> + // end::clone-index-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + public void testRolloverIndex() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/indices/clone_index.asciidoc b/docs/java-rest/high-level/indices/clone_index.asciidoc new file mode 100644 index 0000000000000..7448b8a402bb0 --- /dev/null +++ b/docs/java-rest/high-level/indices/clone_index.asciidoc @@ -0,0 +1,80 @@ +-- +:api: clone-index +:request: ResizeRequest +:response: ResizeResponse +-- + +[id="{upid}-{api}"] +=== Clone Index API + +[id="{upid}-{api}-request"] +==== Resize Request + +The Clone Index API requires a +{request}+ instance. +A +{request}+ requires two string arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The target index (first argument) to clone the source index (second argument) into +<2> The resize type needs to be set to `CLONE` + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the index is opened +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-waitForActiveShards] +-------------------------------------------------- +<1> The number of active shard copies to wait for before the clone index API +returns a response, as an `int` +<2> The number of active shard copies to wait for before the clone index API +returns a response, as an `ActiveShardCount` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-settings] +-------------------------------------------------- +<1> The settings to apply to the target index, which optionally include the +number of shards to create for it + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-aliases] +-------------------------------------------------- +<1> The aliases to associate the target index with + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Clone Index Response + +The returned +{response}+ allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request +<2> Indicates whether the requisite number of shard copies were started for +each shard in the index before timing out + + diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 0aecc5f0021fc..d07f43abac5f1 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -99,6 +99,7 @@ Index Management:: * <<{upid}-close-index>> * <<{upid}-shrink-index>> * <<{upid}-split-index>> +* <<{upid}-clone-index>> * <<{upid}-refresh>> * <<{upid}-flush>> * <<{upid}-flush-synced>> @@ -133,6 +134,7 @@ include::indices/open_index.asciidoc[] include::indices/close_index.asciidoc[] include::indices/shrink_index.asciidoc[] include::indices/split_index.asciidoc[] +include::indices/clone_index.asciidoc[] include::indices/refresh.asciidoc[] include::indices/flush.asciidoc[] include::indices/flush_synced.asciidoc[] diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index 2a4cff93ba759..b3c9166437073 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -15,6 +15,7 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> +* <> * <> * <> * <> @@ -72,6 +73,8 @@ include::indices/shrink-index.asciidoc[] include::indices/split-index.asciidoc[] +include::indices/clone-index.asciidoc[] + include::indices/rollover-index.asciidoc[] include::indices/apis/freeze.asciidoc[] diff --git a/docs/reference/indices/clone-index.asciidoc b/docs/reference/indices/clone-index.asciidoc new file mode 100644 index 0000000000000..a57e0e30593ab --- /dev/null +++ b/docs/reference/indices/clone-index.asciidoc @@ -0,0 +1,138 @@ +[[indices-clone-index]] +== Clone Index + +The clone index API allows you to clone an existing index into a new index, +where each original primary shard is cloned into a new primary shard in +the new index. + +[float] +=== How does cloning work? + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source + index. + +* Then it hard-links segments from the source index into the target index. (If + the file system doesn't support hard-linking, then all segments are copied + into the new index, which is a much more time consuming process.) + +* Finally, it recovers the target index as though it were a closed index which + had just been re-opened. + +[float] +=== Preparing an index for cloning + +Create a new index: + +[source,js] +-------------------------------------------------- +PUT my_source_index +{ + "settings": { + "index.number_of_shards" : 5 + } +} +-------------------------------------------------- +// CONSOLE + +In order to clone an index, the index must be marked as read-only, +and have <> `green`. + +This can be achieved with the following request: + +[source,js] +-------------------------------------------------- +PUT /my_source_index/_settings +{ + "settings": { + "index.blocks.write": true <1> + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> Prevents write operations to this index while still allowing metadata + changes like deleting the index. + +[float] +=== Cloning an index + +To clone `my_source_index` into a new index called `my_target_index`, issue +the following request: + +[source,js] +-------------------------------------------------- +POST my_source_index/_clone/my_target_index +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The above request returns immediately once the target index has been added to +the cluster state -- it doesn't wait for the clone operation to start. + +[IMPORTANT] +===================================== + +Indices can only be cloned if they satisfy the following requirements: + +* the target index must not exist + +* The source index must have the same number of primary shards as the target index. + +* The node handling the clone process must have sufficient free disk space to + accommodate a second copy of the existing index. + +===================================== + +The `_clone` API is similar to the <> +and accepts `settings` and `aliases` parameters for the target index: + +[source,js] +-------------------------------------------------- +POST my_source_index/_clone/my_target_index +{ + "settings": { + "index.number_of_shards": 5 <1> + }, + "aliases": { + "my_search_indices": {} + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true, "index.number_of_shards": "5"}}\n/] + +<1> The number of shards in the target index. This must be equal to the + number of shards in the source index. + + +NOTE: Mappings may not be specified in the `_clone` request. The mappings of +the source index will be used for the target index. + +[float] +=== Monitoring the clone process + +The clone process can be monitored with the <>, or the <> can be used to wait +until all primary shards have been allocated by setting the `wait_for_status` +parameter to `yellow`. + +The `_clone` API returns as soon as the target index has been added to the +cluster state, before any shards have been allocated. At this point, all +shards are in the state `unassigned`. If, for any reason, the target index +can't be allocated, its primary shard will remain `unassigned` until it +can be allocated on that node. + +Once the primary shard is allocated, it moves to state `initializing`, and the +clone process begins. When the clone operation completes, the shard will +become `active`. At that point, Elasticsearch will try to allocate any +replicas and may decide to relocate the primary shard to another node. + +[float] +=== Wait For Active Shards + +Because the clone operation creates a new index to clone the shards to, +the <> setting +on index creation applies to the clone index action as well. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json new file mode 100644 index 0000000000000..fe847488c2c33 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json @@ -0,0 +1,39 @@ +{ + "indices.clone": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html", + "stability": "stable", + "methods": ["PUT", "POST"], + "url": { + "paths": ["/{index}/_clone/{target}"], + "parts": { + "index": { + "type" : "string", + "required" : true, + "description" : "The name of the source index to clone" + }, + "target": { + "type" : "string", + "required" : true, + "description" : "The name of the target index to clone into" + } + }, + "params": { + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, + "master_timeout": { + "type" : "time", + "description" : "Specify timeout for connection to master" + }, + "wait_for_active_shards": { + "type" : "string", + "description" : "Set the number of active shards to wait for on the cloned index before the operation returns." + } + } + }, + "body": { + "description" : "The configuration for the target index (`settings` and `aliases`)" + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml new file mode 100644 index 0000000000000..2d87acf63154c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -0,0 +1,111 @@ +--- +setup: + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + - do: + index: + index: source + id: "1" + body: { "foo": "hello world" } + + - do: + index: + index: source + id: "2" + body: { "foo": "hello world 2" } + + - do: + index: + index: source + id: "3" + body: { "foo": "hello world 3" } + +--- +"Clone index via API": + - skip: + version: " - 7.99.99" + reason: index cloning was added in 8.0.0 + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual clone + - do: + indices.clone: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 2 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + id: "1" + + - match: { _index: target } + - match: { _type: _doc } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + + - do: + get: + index: target + id: "2" + + - match: { _index: target } + - match: { _type: _doc } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + + - do: + get: + index: target + id: "3" + + - match: { _index: target } + - match: { _type: _doc } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + +--- +"Create illegal clone indices": + - skip: + version: " - 7.99.99" + reason: index cloning was added in 8.0.0 + # try to do an illegal clone with illegal number_of_shards + - do: + catch: /illegal_argument_exception/ + indices.clone: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 6 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml new file mode 100644 index 0000000000000..a8a4a71de86b1 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml @@ -0,0 +1,65 @@ +--- +"Clone index ignores target template mapping": + - skip: + version: " - 7.99.99" + reason: index cloning was added in 8.0.0 + # create index + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + count: + type: text + + # index document + - do: + index: + index: source + id: "1" + body: { "count": "1" } + + # create template matching shrink target + - do: + indices.put_template: + name: tpl1 + body: + index_patterns: targ* + mappings: + properties: + count: + type: integer + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual clone + - do: + indices.clone: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_shards: 1 + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml new file mode 100644 index 0000000000000..426e67385b331 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml @@ -0,0 +1,61 @@ +--- +"Copy settings during clone index": + - skip: + version: " - 7.99.99" + reason: index cloning was added in 8.0.0 + features: [arbitrary_key] + + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 1 + index.merge.scheduler.max_merge_count: 4 + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do an actual clone and copy settings + - do: + indices.clone: + index: "source" + target: "copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 1 + index.merge.scheduler.max_thread_count: 2 + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "copy-settings-target" + + # settings should be copied + - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } + - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - match: { copy-settings-target.settings.index.blocks.write: "true" } diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 9c2ddf2b1d0d5..2b295af52709d 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -598,6 +598,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestCreateIndexAction(settings, restController)); registerHandler.accept(new RestResizeHandler.RestShrinkIndexAction(settings, restController)); registerHandler.accept(new RestResizeHandler.RestSplitIndexAction(settings, restController)); + registerHandler.accept(new RestResizeHandler.RestCloneIndexAction(settings, restController)); registerHandler.accept(new RestRolloverIndexAction(settings, restController)); registerHandler.accept(new RestDeleteIndexAction(settings, restController)); registerHandler.accept(new RestCloseIndexAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index a42ed270f84f6..1732d1c1df6e0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.admin.indices.shrink; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -100,6 +101,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); + if (type == ResizeType.CLONE && out.getVersion().before(Version.V_8_0_0)) { + throw new IllegalArgumentException("can't send clone request to a node that's older than " + Version.V_8_0_0); + } out.writeEnum(type); out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java index bca386a9567d6..ccb1c37a02191 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java @@ -23,5 +23,5 @@ * The type of the resize operation */ public enum ResizeType { - SHRINK, SPLIT; + SHRINK, SPLIT, CLONE; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index a64b6e65d72d1..f13c1096d5548 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -133,8 +133,13 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings); } else { - assert resizeRequest.getResizeType() == ResizeType.SHRINK : "split must specify the number of shards explicitly"; - numShards = 1; + assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly"; + if (resizeRequest.getResizeType() == ResizeType.SHRINK) { + numShards = 1; + } else { + assert resizeRequest.getResizeType() == ResizeType.CLONE; + numShards = metaData.getNumberOfShards(); + } } for (int i = 0; i < numShards; i++) { @@ -151,15 +156,17 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi + "] docs - too many documents in shards " + shardIds); } } - } else { + } else if (resizeRequest.getResizeType() == ResizeType.SPLIT) { Objects.requireNonNull(IndexMetaData.selectSplitShard(i, metaData, numShards)); // we just execute this to ensure we get the right exceptions if the number of shards is wrong or less then etc. + } else { + Objects.requireNonNull(IndexMetaData.selectCloneShard(i, metaData, numShards)); + // we just execute this to ensure we get the right exceptions if the number of shards is wrong etc. } } if (IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) { throw new IllegalArgumentException("cannot provide a routing partition size value when resizing an index"); - } if (IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(targetIndexSettings)) { // if we have a source index with 1 shards it's legal to set this diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 3598753f80d9c..6bd98926865e9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -1500,6 +1500,22 @@ public static ShardId selectSplitShard(int shardId, IndexMetaData sourceIndexMet return new ShardId(sourceIndexMetadata.getIndex(), shardId/routingFactor); } + /** + * Returns the source shard ID to clone the given target shard off + * @param shardId the id of the target shard to clone into + * @param sourceIndexMetadata the source index metadata + * @param numTargetShards the total number of shards in the target index + * @return a the source shard ID to clone from + */ + public static ShardId selectCloneShard(int shardId, IndexMetaData sourceIndexMetadata, int numTargetShards) { + int numSourceShards = sourceIndexMetadata.getNumberOfShards(); + if (numSourceShards != numTargetShards) { + throw new IllegalArgumentException("the number of target shards (" + numTargetShards + ") must be the same as the number of " + + " source shards ( " + numSourceShards + ")"); + } + return new ShardId(sourceIndexMetadata.getIndex(), shardId); + } + private static void assertSplitMetadata(int numSourceShards, int numTargetShards, IndexMetaData sourceIndexMetadata) { if (numSourceShards > numTargetShards) { throw new IllegalArgumentException("the number of source shards [" + numSourceShards @@ -1530,8 +1546,9 @@ public static Set selectRecoverFromShards(int shardId, IndexMetaData so return selectShrinkShards(shardId, sourceIndexMetadata, numTargetShards); } else if (sourceIndexMetadata.getNumberOfShards() < numTargetShards) { return Collections.singleton(selectSplitShard(shardId, sourceIndexMetadata, numTargetShards)); + } else { + return Collections.singleton(selectCloneShard(shardId, sourceIndexMetadata, numTargetShards)); } - throw new IllegalArgumentException("can't select recover from shards if both indices have the same number of shards"); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 863871314060e..fc97c80a65ed5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -705,9 +705,16 @@ static void validateSplitIndex(ClusterState state, String sourceIndex, IndexMetaData.selectSplitShard(0, sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } - private static IndexMetaData validateResize(ClusterState state, String sourceIndex, - Set targetIndexMappingsTypes, String targetIndexName, - Settings targetIndexSettings) { + static void validateCloneIndex(ClusterState state, String sourceIndex, + Set targetIndexMappingsTypes, String targetIndexName, + Settings targetIndexSettings) { + IndexMetaData sourceMetaData = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings); + IndexMetaData.selectCloneShard(0, sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); + } + + static IndexMetaData validateResize(ClusterState state, String sourceIndex, + Set targetIndexMappingsTypes, String targetIndexName, + Settings targetIndexSettings) { if (state.metaData().hasIndex(targetIndexName)) { throw new ResourceAlreadyExistsException(state.metaData().index(targetIndexName).getIndex()); } @@ -760,6 +767,9 @@ static void prepareResizeIndexSettings( } else if (type == ResizeType.SPLIT) { validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build()); indexSettingsBuilder.putNull(initialRecoveryIdFilter); + } else if (type == ResizeType.CLONE) { + validateCloneIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build()); + indexSettingsBuilder.putNull(initialRecoveryIdFilter); } else { throw new IllegalStateException("unknown resize type is " + type); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java index ca1c52addac3f..5dc51e3143463 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java @@ -53,11 +53,13 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } IndexMetaData sourceIndexMetaData = allocation.metaData().getIndexSafe(resizeSourceIndex); if (indexMetaData.getNumberOfShards() < sourceIndexMetaData.getNumberOfShards()) { - // this only handles splits so far. + // this only handles splits and clone so far. return Decision.ALWAYS; } - ShardId shardId = IndexMetaData.selectSplitShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards()); + ShardId shardId = indexMetaData.getNumberOfShards() == sourceIndexMetaData.getNumberOfShards() ? + IndexMetaData.selectCloneShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards()) : + IndexMetaData.selectSplitShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards()); ShardRouting sourceShardRouting = allocation.routingNodes().activePrimary(shardId); if (sourceShardRouting == null) { return allocation.decision(Decision.NO, NAME, "source primary shard [%s] is not active", shardId); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index 3d0158cf95f0f..4575d88496f29 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -93,4 +93,24 @@ protected ResizeType getResizeType() { } + public static class RestCloneIndexAction extends RestResizeHandler { + + public RestCloneIndexAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/{index}/_clone/{target}", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_clone/{target}", this); + } + + @Override + public String getName() { + return "clone_index_action"; + } + + @Override + protected ResizeType getResizeType() { + return ResizeType.CLONE; + } + + } + } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java new file mode 100644 index 0000000000000..73d6bede61650 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.create; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.VersionUtils; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +public class CloneIndexIT extends ESIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testCreateCloneIndex() { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = randomIntBetween(1, 5); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) + .put("number_of_shards", numPrimaryShards) + .put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source", "type") + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none" + )).get(); + try { + + final boolean createWithReplicas = randomBoolean(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder() + .put("index.number_of_replicas", createWithReplicas ? 1 : 0) + .putNull("index.blocks.write") + .build()).get()); + ensureGreen(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + assertThat(targetStats.getIndex("target").getIndexShards().keySet().size(), equalTo(numPrimaryShards)); + + for (int i = 0; i < numPrimaryShards; i++) { + final SeqNoStats sourceSeqNoStats = sourceStats.getIndex("source").getIndexShards().get(i).getAt(0).getSeqNoStats(); + final SeqNoStats targetSeqNoStats = targetStats.getIndex("target").getIndexShards().get(i).getAt(0).getSeqNoStats(); + assertEquals(sourceSeqNoStats.getMaxSeqNo(), targetSeqNoStats.getMaxSeqNo()); + assertEquals(targetSeqNoStats.getMaxSeqNo(), targetSeqNoStats.getLocalCheckpoint()); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + if (createWithReplicas == false) { + // bump replicas + client().admin().indices().prepareUpdateSettings("target") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 1)).get(); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + } + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target", "type") + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null + )).get(); + } + + } + +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index e3b6234daa7ca..54a97871ec696 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -203,8 +203,7 @@ public void testSelectResizeShards() { assertEquals(IndexMetaData.selectShrinkShards(shard, shrink, numTargetShards), IndexMetaData.selectRecoverFromShards(shard, shrink, numTargetShards)); - assertEquals("can't select recover from shards if both indices have the same number of shards", - expectThrows(IllegalArgumentException.class, () -> IndexMetaData.selectRecoverFromShards(0, shrink, 32)).getMessage()); + IndexMetaData.selectRecoverFromShards(0, shrink, 32); } public void testSelectSplitShard() { From aefb72040c3e432d3913db461785c7cc5535caaf Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Jul 2019 11:20:42 -0700 Subject: [PATCH 27/51] [DOCS] Updates terms in machine learning calendar APIs (#44866) --- .../high-level/ml/delete-calendar-event.asciidoc | 6 +++--- .../high-level/ml/delete-calendar-job.asciidoc | 10 +++++----- .../java-rest/high-level/ml/delete-calendar.asciidoc | 8 ++++---- .../high-level/ml/put-calendar-job.asciidoc | 12 ++++++------ docs/java-rest/high-level/ml/put-calendar.asciidoc | 12 ++++++------ .../apis/delete-calendar-job.asciidoc | 8 ++++---- .../anomaly-detection/apis/put-calendar-job.asciidoc | 8 ++++---- 7 files changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc b/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc index dcd09a0581ddf..ffad85dd45bb4 100644 --- a/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc @@ -4,13 +4,13 @@ :response: AcknowledgedResponse -- [id="{upid}-{api}"] -=== Delete Calendar Event API +=== Delete calendar event API Removes a scheduled event from an existing {ml} calendar. The API accepts a +{request}+ and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Delete Calendar Event Request +==== Delete calendar event request A +{request}+ is constructed referencing a non-null calendar ID, and eventId which to remove from the calendar @@ -23,7 +23,7 @@ include-tagged::{doc-tests-file}[{api}-request] <2> The eventId to remove from the calendar [id="{upid}-{api}-response"] -==== Delete Calendar Event Response +==== Delete calendar event response The returned +{response}+ acknowledges the success of the request: diff --git a/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc index 4e55a221b85ab..5e4463b97e594 100644 --- a/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc @@ -4,13 +4,13 @@ :response: PutCalendarResponse -- [id="{upid}-{api}"] -=== Delete Calendar Job API -Removes {ml} jobs from an existing {ml} calendar. +=== Delete {anomaly-jobs} from calendar API +Removes {anomaly-jobs} from an existing {ml} calendar. The API accepts a +{request}+ and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Delete Calendar Job Request +==== Delete {anomaly-jobs} from calendar request A +{request}+ is constructed referencing a non-null calendar ID, and JobIDs which to remove from the calendar @@ -23,7 +23,7 @@ include-tagged::{doc-tests-file}[{api}-request] <2> The JobIds to remove from the calendar [id="{upid}-{api}-response"] -==== Delete Calendar Job Response +==== Delete {anomaly-jobs} from calendar response The returned +{response}+ contains the updated Calendar: @@ -31,6 +31,6 @@ The returned +{response}+ contains the updated Calendar: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- -<1> The updated Calendar with the jobs removed +<1> The updated calendar with the jobs removed include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ml/delete-calendar.asciidoc b/docs/java-rest/high-level/ml/delete-calendar.asciidoc index e7d5318a465d0..e39a1997d525d 100644 --- a/docs/java-rest/high-level/ml/delete-calendar.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar.asciidoc @@ -4,13 +4,13 @@ :response: AcknowledgedResponse -- [id="{upid}-{api}"] -=== Delete Calendar API +=== Delete calendar API Delete a {ml} calendar. The API accepts a +{request}+ and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Delete Calendar Request +==== Delete calendar request A `DeleteCalendar` object requires a non-null `calendarId`. @@ -18,10 +18,10 @@ A `DeleteCalendar` object requires a non-null `calendarId`. --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new request referencing an existing Calendar +<1> Constructing a new request referencing an existing calendar [id="{upid}-{api}-response"] -==== Delete Calendar Response +==== Delete calendar response The returned +{response}+ object indicates the acknowledgement of the request: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/ml/put-calendar-job.asciidoc b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc index 17fc3a93063ca..f178fa82c8043 100644 --- a/docs/java-rest/high-level/ml/put-calendar-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc @@ -4,13 +4,13 @@ :response: PutCalendarResponse -- [id="{upid}-{api}"] -=== Put Calendar Job API -Adds {ml} jobs to an existing {ml} calendar. +=== Put {anomaly-jobs} in calendar API +Adds {anomaly-jobs} jobs to an existing {ml} calendar. The API accepts a +{request}+ and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Put Calendar Job Request +==== Put {anomaly-jobs} in calendar request A +{request}+ is constructed referencing a non-null calendar ID, and JobIDs to which to add to the calendar @@ -23,14 +23,14 @@ include-tagged::{doc-tests-file}[{api}-request] <2> The JobIds to add to the calendar [id="{upid}-{api}-response"] -==== Put Calendar Response +==== Put {anomaly-jobs} in calendar response -The returned +{response}+ contains the updated Calendar: +The returned +{response}+ contains the updated calendar: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- -<1> The updated Calendar +<1> The updated calendar include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ml/put-calendar.asciidoc b/docs/java-rest/high-level/ml/put-calendar.asciidoc index defd72e35a056..be45f573bdb85 100644 --- a/docs/java-rest/high-level/ml/put-calendar.asciidoc +++ b/docs/java-rest/high-level/ml/put-calendar.asciidoc @@ -4,32 +4,32 @@ :response: PutCalendarResponse -- [id="{upid}-{api}"] -=== Put Calendar API +=== Put calendar API Creates a new {ml} calendar. The API accepts a +{request}+ and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Put Calendar Request +==== Put calendar request -A +{request}+ is constructed with a Calendar object +A +{request}+ is constructed with a calendar object ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Create a request with the given Calendar +<1> Create a request with the given calendar. [id="{upid}-{api}-response"] ==== Put Calendar Response -The returned +{response}+ contains the created Calendar: +The returned +{response}+ contains the created calendar: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- -<1> The created Calendar +<1> The created calendar. include::../execution.asciidoc[] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc index 096918a821195..7eb18772beb52 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="platinum"] [[ml-delete-calendar-job]] -=== Delete jobs from calendar API +=== Delete {anomaly-jobs} from calendar API ++++ Delete jobs from calendar ++++ -Deletes jobs from a calendar. +Deletes {anomaly-jobs} from a calendar. [[ml-delete-calendar-job-request]] ==== {api-request-title} @@ -27,8 +27,8 @@ Deletes jobs from a calendar. (Required, string) Identifier for the calendar. ``:: - (Required, string) An identifier for the job. It can be a job identifier, a - group name, or a comma-separated list of jobs or groups. + (Required, string) An identifier for the {anomaly-jobs}. It can be a job + identifier, a group name, or a comma-separated list of jobs or groups. [[ml-delete-calendar-job-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc index d693543931013..927829e44e9cf 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="platinum"] [[ml-put-calendar-job]] -=== Add jobs to calendar API +=== Add {anomaly-jobs} to calendar API ++++ Add jobs to calendar ++++ -Adds a job to a calendar. +Adds an {anomaly-job} to a calendar. [[ml-put-calendar-job-request]] ==== {api-request-title} @@ -27,8 +27,8 @@ Adds a job to a calendar. (Required, string) Identifier for the calendar. ``:: - (Required, string) An identifier for the job. It can be a job identifier, a - group name, or a comma-separated list of jobs or groups. + (Required, string) An identifier for the {anomaly-jobs}. It can be a job + identifier, a group name, or a comma-separated list of jobs or groups. [[ml-put-calendar-job-example]] ==== {api-examples-title} From 280b40eaa4f9e34e5c59e362ecd3e19b7ac62a59 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Jul 2019 11:32:31 -0700 Subject: [PATCH 28/51] [DOCS] Adds command reference for elasticsearch-croneval (#43946) --- docs/reference/commands/croneval.asciidoc | 52 +++++++++++++++++++ docs/reference/commands/index.asciidoc | 2 + .../en/watcher/trigger/schedule/cron.asciidoc | 26 ++++------ 3 files changed, 64 insertions(+), 16 deletions(-) create mode 100644 docs/reference/commands/croneval.asciidoc diff --git a/docs/reference/commands/croneval.asciidoc b/docs/reference/commands/croneval.asciidoc new file mode 100644 index 0000000000000..be9b16770dc33 --- /dev/null +++ b/docs/reference/commands/croneval.asciidoc @@ -0,0 +1,52 @@ +[role="xpack"] +[testenv="gold+"] +[[elasticsearch-croneval]] +== elasticsearch-croneval + +Validates and evaluates a cron expression. + +[discrete] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-croneval +[-c, --count ] [-h, --help] +([-s, --silent] | [-v, --verbose]) +-------------------------------------------------- + +[discrete] +=== Description + +This command enables you to verify that your +https://en.wikipedia.org/wiki/Cron[cron] expressions are valid for use with the +{es} {alert-features} and produce the expected results. + +This command is provided in the `$ES_HOME/bin` directory. + +[discrete] +=== Parameters + +`-c, --count` :: + The number of future times this expression will be triggered. The default + value is `10`. + +`-h, --help`:: + Returns all of the command parameters. + +`-s, --silent`:: + Shows minimal output. + +`-v, --verbose`:: + Shows verbose output. + +[discrete] +=== Examples + +If the cron expression is valid, the following command displays the next +20 times that the schedule will be triggered: + +[source,bash] +-------------------------------------------------- +bin/elasticsearch-croneval "0 0/1 * * * ?" -c 20 +-------------------------------------------------- diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index e778366aa58b9..81e9000c9bb13 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -9,6 +9,7 @@ tasks from the command line: * <> * <> +* <> * <> * <> * <> @@ -20,6 +21,7 @@ tasks from the command line: include::certgen.asciidoc[] include::certutil.asciidoc[] +include::croneval.asciidoc[] include::node-tool.asciidoc[] include::saml-metadata.asciidoc[] include::setup-passwords.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc index af0f7319398f4..2e5b2feb8b351 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -211,27 +211,21 @@ minute during the weekend: // NOTCONSOLE [[croneval]] -===== Verifying Cron Expressions +===== Verifying cron expressions -The {es} {alert-features} provide a `elasticsearch-croneval` command line tool -that you can use to verify that -your cron expressions are valid and produce the expected results. This tool is -provided in the `$ES_HOME/bin` directory. +The {es} {alert-features} provide a +{ref}/elasticsearch-croneval.html[`elasticsearch-croneval`] command line tool +that you can use to verify that your cron expressions are valid and produce the +expected results. This tool is provided in the `$ES_HOME/bin` directory. -To verify a cron expression, simply pass it in as a parameter to `elasticsearch-croneval`: +To verify a cron expression, simply pass it in as a parameter to +`elasticsearch-croneval`: [source,bash] -------------------------------------------------- bin/elasticsearch-croneval "0 0/1 * * * ?" -------------------------------------------------- -If the cron expression is valid, `elasticsearch-croneval` displays the next 10 times that the -schedule will be triggered. - -You can specify the `-c` option to control how many future trigger times are -displayed. For example, the following command displays the next 20 trigger times: - -[source,bash] --------------------------------------------------- -bin/elasticsearch-croneval "0 0/1 * * * ?" -c 20 --------------------------------------------------- +If the cron expression is valid, `elasticsearch-croneval` displays the next 10 +times that the schedule will be triggered. You can specify the `-c` option to +control how many future trigger times are displayed. From 659f60f62fee00fc8f7d6267924334d142a089a3 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 25 Jul 2019 11:40:21 -0700 Subject: [PATCH 29/51] Add missing ZonedDateTime methods for joda compat layer (#44829) While joda no longer exists in the apis for 7.x, the compatibility layer still exists with helper methods mimicking the behavior of joda for ZonedDateTime objects returned for date fields in scripts. This layer was originally intended to be removed in 7.0, but is now likely to exist for the lifetime of 7.x. This commit adds missing methods from ChronoZonedDateTime to the compat class. These methods were not part of joda, but are needed to act like a real ZonedDateTime. relates #44411 --- .../painless/spi/org.elasticsearch.txt | 11 +++++ .../script/JodaCompatibleZonedDateTime.java | 42 +++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index 031fc342f339b..c1da194b98fb3 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -127,6 +127,17 @@ class org.elasticsearch.script.JodaCompatibleZonedDateTime { ZonedDateTime withZoneSameLocal(ZoneId) ZonedDateTime withZoneSameInstant(ZoneId) + #### ChronoZonedDateTime + int compareTo(JodaCompatibleZonedDateTime) + Chronology getChronology() + String format(DateTimeFormatter) + int get(TemporalField) + long getLong(TemporalField) + ZoneOffset getOffset() + boolean isSupported(TemporalField) + long toEpochSecond() + LocalTime toLocalTime() + #### Joda methods that exist in java time boolean equals(Object) int hashCode() diff --git a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java index fc3816cad8a15..017acbf4951ec 100644 --- a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java +++ b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java @@ -32,10 +32,14 @@ import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; +import java.time.LocalTime; import java.time.Month; import java.time.OffsetDateTime; import java.time.ZoneId; +import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.chrono.Chronology; +import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAdjuster; import java.time.temporal.TemporalAmount; @@ -94,6 +98,42 @@ public String toString() { return DATE_FORMATTER.format(dt); } + public String format(DateTimeFormatter formatter) { + return dt.format(formatter); + } + + public int get(TemporalField field) { + return dt.get(field); + } + + public long getLong(TemporalField field) { + return dt.getLong(field); + } + + public Chronology getChronology() { + return dt.getChronology(); + } + + public int compareTo(JodaCompatibleZonedDateTime o) { + return dt.compareTo(o.dt); + } + + public ZoneOffset getOffset() { + return dt.getOffset(); + } + + public boolean isSupported(TemporalField field) { + return dt.isSupported(field); + } + + public long toEpochSecond() { + return dt.toEpochSecond(); + } + + public LocalTime toLocalTime() { + return dt.toLocalTime(); + } + public boolean isAfter(JodaCompatibleZonedDateTime o) { return dt.isAfter(o.getZonedDateTime()); } @@ -106,6 +146,8 @@ public boolean isEqual(JodaCompatibleZonedDateTime o) { return dt.isEqual(o.getZonedDateTime()); } + + public int getDayOfMonth() { return dt.getDayOfMonth(); } From 016f98c1cc8c6e7f9b37a2c7a1f2e6c1e1285e4c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 25 Jul 2019 12:44:45 -0700 Subject: [PATCH 30/51] Fix issue with Gradle daemons hanging indefinitely on shutdown (#44867) --- .../TestClusterCleanupOnShutdown.java | 44 +++++++++---------- .../TestClustersCleanupExtension.java | 3 +- 2 files changed, 22 insertions(+), 25 deletions(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java index 0381cece108e2..79f131983882d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterCleanupOnShutdown.java @@ -19,16 +19,12 @@ public class TestClusterCleanupOnShutdown implements Runnable { private Set clustersToWatch = new HashSet<>(); - public void watch(Collection cluster) { - synchronized (clustersToWatch) { - clustersToWatch.addAll(clustersToWatch); - } + public synchronized void watch(Collection clusters) { + clustersToWatch.addAll(clusters); } - public void unWatch(Collection cluster) { - synchronized (clustersToWatch) { - clustersToWatch.removeAll(clustersToWatch); - } + public synchronized void unWatch(Collection clusters) { + clustersToWatch.removeAll(clusters); } @Override @@ -38,21 +34,23 @@ public void run() { Thread.sleep(Long.MAX_VALUE); } } catch (InterruptedException interrupted) { - synchronized (clustersToWatch) { - if (clustersToWatch.isEmpty()) { - return; - } - logger.info("Cleanup thread was interrupted, shutting down all clusters"); - Iterator iterator = clustersToWatch.iterator(); - while (iterator.hasNext()) { - ElasticsearchCluster cluster = iterator.next(); - iterator.remove(); - try { - cluster.stop(false); - } catch (Exception e) { - logger.warn("Could not shut down {}", cluster, e); - } - } + shutdownClusters(); + } + } + + public synchronized void shutdownClusters() { + if (clustersToWatch.isEmpty()) { + return; + } + logger.info("Cleanup thread was interrupted, shutting down all clusters"); + Iterator iterator = clustersToWatch.iterator(); + while (iterator.hasNext()) { + ElasticsearchCluster cluster = iterator.next(); + iterator.remove(); + try { + cluster.stop(false); + } catch (Exception e) { + logger.warn("Could not shut down {}", cluster, e); } } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java index 14bdfa952db0f..86d496358c1b7 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersCleanupExtension.java @@ -33,7 +33,6 @@ public TestClustersCleanupExtension() { executorService.submit(cleanupThread); } - public static void createExtension(Project project) { if (project.getRootProject().getExtensions().findByType(TestClustersCleanupExtension.class) != null) { return; @@ -43,7 +42,7 @@ public static void createExtension(Project project) { "__testclusters_rate_limit", TestClustersCleanupExtension.class ); - Thread shutdownHook = new Thread(ext.cleanupThread::run); + Thread shutdownHook = new Thread(ext.cleanupThread::shutdownClusters); Runtime.getRuntime().addShutdownHook(shutdownHook); project.getGradle().buildFinished(buildResult -> { ext.executorService.shutdownNow(); From ae486e4911da7969d35f6f3d76f1130c0f17df04 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 25 Jul 2019 22:51:04 +0200 Subject: [PATCH 31/51] Asynchronously connect to remote clusters (#44825) Refactors RemoteClusterConnection so that it no longer blockingly connects to remote clusters. Relates to #40150 --- .../transport/RemoteClusterConnection.java | 393 ++++++++---------- .../RemoteClusterConnectionTests.java | 30 +- 2 files changed, 186 insertions(+), 237 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 731aa81179cb8..4205dcbddaba5 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -25,11 +25,11 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.StepListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.ContextPreservingActionListener; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -38,7 +38,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.internal.io.IOUtils; @@ -48,17 +47,14 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; @@ -138,7 +134,7 @@ private static DiscoveryNode maybeAddProxyAddress(String proxyAddress, Discovery if (proxyAddress == null || proxyAddress.isEmpty()) { return node; } else { - // resovle proxy address lazy here + // resolve proxy address lazy here InetSocketAddress proxyInetAddress = RemoteClusterAware.parseSeedAddress(proxyAddress); return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node .getHostAddress(), new TransportAddress(proxyInetAddress), node.getAttributes(), node.getRoles(), node.getVersion()); @@ -175,7 +171,9 @@ boolean isSkipUnavailable() { public void onNodeDisconnected(DiscoveryNode node) { if (connectionManager.size() < maxNumRemoteConnections) { // try to reconnect and fill up the slot of the disconnected node - connectHandler.forceConnect(); + connectHandler.connect(ActionListener.wrap( + ignore -> logger.trace("successfully connected after disconnect of {}", node), + e -> logger.trace(() -> new ParameterizedMessage("failed to connect after disconnect of {}", node), e))); } } @@ -357,201 +355,178 @@ public List>> getSeedNodes() { * we will just reject the connect trigger which will lead to failing searches. */ private class ConnectHandler implements Closeable { - private final Semaphore running = new Semaphore(1); + private static final int MAX_LISTENERS = 100; private final AtomicBoolean closed = new AtomicBoolean(false); - private final BlockingQueue> queue = new ArrayBlockingQueue<>(100); - private final CancellableThreads cancellableThreads = new CancellableThreads(); - - /** - * Triggers a connect round iff there are pending requests queued up and if there is no - * connect round currently running. - */ - void maybeConnect() { - connect(null); - } + private final Object mutex = new Object(); + private List> listeners = new ArrayList<>(); /** * Triggers a connect round unless there is one running already. If there is a connect round running, the listener will either * be queued or rejected and failed. */ void connect(ActionListener connectListener) { - connect(connectListener, false); - } - - /** - * Triggers a connect round unless there is one already running. In contrast to {@link #maybeConnect()} will this method also - * trigger a connect round if there is no listener queued up. - */ - void forceConnect() { - connect(null, true); - } - - private void connect(ActionListener connectListener, boolean forceRun) { - final boolean runConnect; - final Collection> toNotify; - final ActionListener listener = connectListener == null ? null : + boolean runConnect = false; + final ActionListener listener = ContextPreservingActionListener.wrapPreservingContext(connectListener, threadPool.getThreadContext()); - synchronized (queue) { - if (listener != null && queue.offer(listener) == false) { - listener.onFailure(new RejectedExecutionException("connect queue is full")); - return; - } - if (forceRun == false && queue.isEmpty()) { - return; - } - runConnect = running.tryAcquire(); - if (runConnect) { - toNotify = new ArrayList<>(); - queue.drainTo(toNotify); - if (closed.get()) { - running.release(); - ActionListener.onFailure(toNotify, new AlreadyClosedException("connect handler is already closed")); + synchronized (mutex) { + if (closed.get()) { + assert listeners.isEmpty(); + } else { + if (listeners.size() >= MAX_LISTENERS) { + assert listeners.size() == MAX_LISTENERS; + listener.onFailure(new RejectedExecutionException("connect queue is full")); return; + } else { + listeners.add(listener); } - } else { - toNotify = Collections.emptyList(); + runConnect = listeners.size() == 1; } } - if (runConnect) { - forkConnect(toNotify); + if (closed.get()) { + connectListener.onFailure(new AlreadyClosedException("connect handler is already closed")); + return; } - } - - private void forkConnect(final Collection> toNotify) { - ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); - executor.submit(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - synchronized (queue) { - running.release(); - } - try { - ActionListener.onFailure(toNotify, e); - } finally { - maybeConnect(); + if (runConnect) { + ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); + executor.submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + ActionListener.onFailure(getAndClearListeners(), e); } - } - @Override - protected void doRun() { - ActionListener listener = ActionListener.wrap((x) -> { - synchronized (queue) { - running.release(); - } - try { - ActionListener.onResponse(toNotify, x); - } finally { - maybeConnect(); - } + @Override + protected void doRun() { + collectRemoteNodes(seedNodes.stream().map(Tuple::v2).iterator(), + new ActionListener<>() { + @Override + public void onResponse(Void aVoid) { + ActionListener.onResponse(getAndClearListeners(), aVoid); + } - }, (e) -> { - synchronized (queue) { - running.release(); - } - try { - ActionListener.onFailure(toNotify, e); - } finally { - maybeConnect(); - } - }); - collectRemoteNodes(seedNodes.stream().map(Tuple::v2).iterator(), transportService, connectionManager, listener); + @Override + public void onFailure(Exception e) { + ActionListener.onFailure(getAndClearListeners(), e); + } + }); + } + }); + } + } + + private List> getAndClearListeners() { + final List> result; + synchronized (mutex) { + if (listeners.isEmpty()) { + result = Collections.emptyList(); + } else { + result = listeners; + listeners = new ArrayList<>(); } - }); + } + return result; } - private void collectRemoteNodes(Iterator> seedNodes, final TransportService transportService, - final ConnectionManager manager, ActionListener listener) { + private void collectRemoteNodes(Iterator> seedNodes, ActionListener listener) { if (Thread.currentThread().isInterrupted()) { listener.onFailure(new InterruptedException("remote connect thread got interrupted")); } - try { - if (seedNodes.hasNext()) { - cancellableThreads.executeIO(() -> { - final DiscoveryNode seedNode = maybeAddProxyAddress(proxyAddress, seedNodes.next().get()); - logger.debug("[{}] opening connection to seed node: [{}] proxy address: [{}]", clusterAlias, seedNode, - proxyAddress); - final TransportService.HandshakeResponse handshakeResponse; - final ConnectionProfile profile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG); - final Transport.Connection connection = PlainActionFuture.get( - fut -> manager.openConnection(seedNode, profile, fut)); - boolean success = false; - try { - try { - ConnectionProfile connectionProfile = connectionManager.getConnectionProfile(); - handshakeResponse = PlainActionFuture.get(fut -> - transportService.handshake(connection, connectionProfile.getHandshakeTimeout().millis(), - getRemoteClusterNamePredicate(), fut)); - } catch (IllegalStateException ex) { - logger.warn(new ParameterizedMessage("failed to connect to seed node [{}]", connection.getNode()), ex); - throw ex; - } - - final DiscoveryNode handshakeNode = maybeAddProxyAddress(proxyAddress, handshakeResponse.getDiscoveryNode()); - if (nodePredicate.test(handshakeNode) && manager.size() < maxNumRemoteConnections) { - PlainActionFuture.get(fut -> manager.connectToNode(handshakeNode, null, - transportService.connectionValidator(handshakeNode), ActionListener.map(fut, x -> null))); - if (remoteClusterName.get() == null) { - assert handshakeResponse.getClusterName().value() != null; - remoteClusterName.set(handshakeResponse.getClusterName()); - } - } - ClusterStateRequest request = new ClusterStateRequest(); - request.clear(); - request.nodes(true); - // here we pass on the connection since we can only close it once the sendRequest returns otherwise - // due to the async nature (it will return before it's actually sent) this can cause the request to fail - // due to an already closed connection. - ThreadPool threadPool = transportService.getThreadPool(); - ThreadContext threadContext = threadPool.getThreadContext(); - TransportService.ContextRestoreResponseHandler responseHandler = new TransportService - .ContextRestoreResponseHandler<>(threadContext.newRestorableContext(false), - new SniffClusterStateResponseHandler(connection, listener, seedNodes, - cancellableThreads)); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - // we stash any context here since this is an internal execution and should not leak any - // existing context information. - threadContext.markAsSystemContext(); - transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, - responseHandler); - } - success = true; - } finally { - if (success == false) { - connection.close(); - } + + if (seedNodes.hasNext()) { + final DiscoveryNode seedNode = maybeAddProxyAddress(proxyAddress, seedNodes.next().get()); + logger.debug("[{}] opening connection to seed node: [{}] proxy address: [{}]", clusterAlias, seedNode, + proxyAddress); + final ConnectionProfile profile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG); + + final StepListener openConnectionStep = new StepListener<>(); + connectionManager.openConnection(seedNode, profile, openConnectionStep); + + final Consumer onFailure = e -> { + if (e instanceof ConnectTransportException || + e instanceof IOException || + e instanceof IllegalStateException) { + // ISE if we fail the handshake with an version incompatible node + if (seedNodes.hasNext()) { + logger.debug(() -> new ParameterizedMessage( + "fetching nodes from external cluster [{}] failed moving to next node", clusterAlias), e); + collectRemoteNodes(seedNodes, listener); + return; } - }); - } else { - listener.onFailure(new IllegalStateException("no seed node left")); - } - } catch (CancellableThreads.ExecutionCancelledException ex) { - logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster [{}] failed", clusterAlias), ex); - listener.onFailure(ex); // we got canceled - fail the listener and step out - } catch (ConnectTransportException | IOException | IllegalStateException ex) { - // ISE if we fail the handshake with an version incompatible node - if (seedNodes.hasNext()) { - logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster [{}] failed moving to next node", - clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, manager, listener); - } else { - logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster [{}] failed", clusterAlias), ex); - listener.onFailure(ex); - } + } + logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster [{}] failed", clusterAlias), e); + listener.onFailure(e); + }; + + final StepListener handShakeStep = new StepListener<>(); + openConnectionStep.whenComplete(connection -> { + ConnectionProfile connectionProfile = connectionManager.getConnectionProfile(); + transportService.handshake(connection, connectionProfile.getHandshakeTimeout().millis(), + getRemoteClusterNamePredicate(), handShakeStep); + }, onFailure); + + final StepListener fullConnectionStep = new StepListener<>(); + handShakeStep.whenComplete(handshakeResponse -> { + final DiscoveryNode handshakeNode = maybeAddProxyAddress(proxyAddress, handshakeResponse.getDiscoveryNode()); + + if (nodePredicate.test(handshakeNode) && connectionManager.size() < maxNumRemoteConnections) { + connectionManager.connectToNode(handshakeNode, null, + transportService.connectionValidator(handshakeNode), fullConnectionStep); + } else { + fullConnectionStep.onResponse(null); + } + }, e -> { + final Transport.Connection connection = openConnectionStep.result(); + logger.warn(new ParameterizedMessage("failed to connect to seed node [{}]", connection.getNode()), e); + IOUtils.closeWhileHandlingException(connection); + onFailure.accept(e); + }); + + fullConnectionStep.whenComplete(aVoid -> { + if (remoteClusterName.get() == null) { + TransportService.HandshakeResponse handshakeResponse = handShakeStep.result(); + assert handshakeResponse.getClusterName().value() != null; + remoteClusterName.set(handshakeResponse.getClusterName()); + } + final Transport.Connection connection = openConnectionStep.result(); + + ClusterStateRequest request = new ClusterStateRequest(); + request.clear(); + request.nodes(true); + // here we pass on the connection since we can only close it once the sendRequest returns otherwise + // due to the async nature (it will return before it's actually sent) this can cause the request to fail + // due to an already closed connection. + ThreadPool threadPool = transportService.getThreadPool(); + ThreadContext threadContext = threadPool.getThreadContext(); + TransportService.ContextRestoreResponseHandler responseHandler = new TransportService + .ContextRestoreResponseHandler<>(threadContext.newRestorableContext(false), + new SniffClusterStateResponseHandler(connection, listener, seedNodes)); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any + // existing context information. + threadContext.markAsSystemContext(); + transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + responseHandler); + } + }, e -> { + IOUtils.closeWhileHandlingException(openConnectionStep.result()); + onFailure.accept(e); + }); + } else { + listener.onFailure(new IllegalStateException("no seed node left")); } } @Override public void close() throws IOException { - try { + final List> toNotify; + synchronized (mutex) { if (closed.compareAndSet(false, true)) { - cancellableThreads.cancel("connect handler is closed"); - running.acquire(); // acquire the semaphore to ensure all connections are closed and all thread joined - running.release(); - maybeConnect(); // now go and notify pending listeners + toNotify = listeners; + listeners = Collections.emptyList(); + } else { + toNotify = Collections.emptyList(); } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); } + ActionListener.onFailure(toNotify, new AlreadyClosedException("connect handler is already closed")); } final boolean isClosed() { @@ -564,15 +539,12 @@ private class SniffClusterStateResponseHandler implements TransportResponseHandl private final Transport.Connection connection; private final ActionListener listener; private final Iterator> seedNodes; - private final CancellableThreads cancellableThreads; SniffClusterStateResponseHandler(Transport.Connection connection, ActionListener listener, - Iterator> seedNodes, - CancellableThreads cancellableThreads) { + Iterator> seedNodes) { this.connection = connection; this.listener = listener; this.seedNodes = seedNodes; - this.cancellableThreads = cancellableThreads; } @Override @@ -582,43 +554,44 @@ public ClusterStateResponse read(StreamInput in) throws IOException { @Override public void handleResponse(ClusterStateResponse response) { - try { - if (remoteClusterName.get() == null) { - assert response.getClusterName().value() != null; - remoteClusterName.set(response.getClusterName()); - } - try (Closeable theConnection = connection) { // the connection is unused - see comment in #collectRemoteNodes - // we have to close this connection before we notify listeners - this is mainly needed for test correctness - // since if we do it afterwards we might fail assertions that check if all high level connections are closed. - // from a code correctness perspective we could also close it afterwards. This try/with block will - // maintain the possibly exceptions thrown from within the try block and suppress the ones that are possible thrown - // by closing the connection - cancellableThreads.executeIO(() -> { - DiscoveryNodes nodes = response.getState().nodes(); - Iterable nodesIter = nodes.getNodes()::valuesIt; - for (DiscoveryNode n : nodesIter) { - DiscoveryNode node = maybeAddProxyAddress(proxyAddress, n); - if (nodePredicate.test(node) && connectionManager.size() < maxNumRemoteConnections) { - try { - // noop if node is connected - PlainActionFuture.get(fut -> connectionManager.connectToNode(node, null, - transportService.connectionValidator(node), ActionListener.map(fut, x -> null))); - } catch (ConnectTransportException | IllegalStateException ex) { + handleNodes(response.getState().nodes().getNodes().valuesIt()); + } + + private void handleNodes(Iterator nodesIter) { + while (nodesIter.hasNext()) { + final DiscoveryNode node = maybeAddProxyAddress(proxyAddress, nodesIter.next()); + if (nodePredicate.test(node) && connectionManager.size() < maxNumRemoteConnections) { + connectionManager.connectToNode(node, null, + transportService.connectionValidator(node), new ActionListener<>() { + @Override + public void onResponse(Void aVoid) { + handleNodes(nodesIter); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ConnectTransportException || + e instanceof IllegalStateException) { // ISE if we fail the handshake with an version incompatible node // fair enough we can't connect just move on - logger.debug(() -> new ParameterizedMessage("failed to connect to node {}", node), ex); + logger.debug(() -> new ParameterizedMessage("failed to connect to node {}", node), e); + handleNodes(nodesIter); + } else { + logger.warn(() -> + new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), e); + IOUtils.closeWhileHandlingException(connection); + collectRemoteNodes(seedNodes, listener); } } - } - }); + }); + return; } - listener.onResponse(null); - } catch (CancellableThreads.ExecutionCancelledException ex) { - listener.onFailure(ex); // we got canceled - fail the listener and step out - } catch (Exception ex) { - logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); - collectRemoteNodes(seedNodes, transportService, connectionManager, listener); } + // We have to close this connection before we notify listeners - this is mainly needed for test correctness + // since if we do it afterwards we might fail assertions that check if all high level connections are closed. + // from a code correctness perspective we could also close it afterwards. + IOUtils.closeWhileHandlingException(connection); + listener.onResponse(null); } @Override @@ -628,7 +601,7 @@ public void handleException(TransportException exp) { IOUtils.closeWhileHandlingException(connection); } finally { // once the connection is closed lets try the next node - collectRemoteNodes(seedNodes, transportService, connectionManager, listener); + collectRemoteNodes(seedNodes, listener); } } @@ -640,7 +613,9 @@ public String executor() { } boolean assertNoRunningConnections() { // for testing only - assert connectHandler.running.availablePermits() == 1; + synchronized (connectHandler.mutex) { + assert connectHandler.listeners.isEmpty(); + } return true; } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 305f8ddc79de3..804f7242c644c 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -47,7 +47,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.core.internal.io.IOUtils; @@ -431,20 +430,6 @@ private void updateSeedNodes( ActionListener listener = ActionListener.wrap( x -> latch.countDown(), x -> { - /* - * This can occur on a thread submitted to the thread pool while we are closing the - * remote cluster connection at the end of the test. - */ - if (x instanceof CancellableThreads.ExecutionCancelledException) { - try { - // we should already be shutting down - assertEquals(0L, latch.getCount()); - } finally { - // ensure we count down the latch on failure as well to not prevent failing tests from ending - latch.countDown(); - } - return; - } exceptionAtomicReference.set(x); latch.countDown(); } @@ -579,7 +564,7 @@ public void run() { closeRemote.countDown(); listenerCalled.await(); assertNotNull(exceptionReference.get()); - expectThrows(CancellableThreads.ExecutionCancelledException.class, () -> { + expectThrows(AlreadyClosedException.class, () -> { throw exceptionReference.get(); }); @@ -639,16 +624,6 @@ public void run() { latch.countDown(); }, x -> { - /* - * This can occur on a thread submitted to the thread pool while we are closing the - * remote cluster connection at the end of the test. - */ - if (x instanceof CancellableThreads.ExecutionCancelledException) { - // we should already be shutting down - assertTrue(executed.get()); - return; - } - assertTrue(executed.compareAndSet(false, true)); latch.countDown(); @@ -736,8 +711,7 @@ public void run() { throw assertionError; } } - if (x instanceof RejectedExecutionException || x instanceof AlreadyClosedException - || x instanceof CancellableThreads.ExecutionCancelledException) { + if (x instanceof RejectedExecutionException || x instanceof AlreadyClosedException) { // that's fine } else { throw new AssertionError(x); From ad64ec197bd181d51badd8107fc047c55873be8d Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Thu, 25 Jul 2019 22:43:41 +0200 Subject: [PATCH 32/51] Adapt BWC condition for clone index API after backport (#44267) Changes the BWC conditions for the clone index API after backport of the feature to 7.x --- .../rest-api-spec/test/indices.clone/10_basic.yml | 8 ++++---- .../test/indices.clone/20_source_mapping.yml | 4 ++-- .../rest-api-spec/test/indices.clone/30_copy_settings.yml | 4 ++-- .../action/admin/indices/shrink/ResizeRequest.java | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index 2d87acf63154c..412d29905ffc2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -29,8 +29,8 @@ setup: --- "Clone index via API": - skip: - version: " - 7.99.99" - reason: index cloning was added in 8.0.0 + version: " - 7.3.99" + reason: index cloning was added in 7.4.0 # make it read-only - do: indices.put_settings: @@ -95,8 +95,8 @@ setup: --- "Create illegal clone indices": - skip: - version: " - 7.99.99" - reason: index cloning was added in 8.0.0 + version: " - 7.3.99" + reason: index cloning was added in 7.4.0 # try to do an illegal clone with illegal number_of_shards - do: catch: /illegal_argument_exception/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml index a8a4a71de86b1..625f574fa73de 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/20_source_mapping.yml @@ -1,8 +1,8 @@ --- "Clone index ignores target template mapping": - skip: - version: " - 7.99.99" - reason: index cloning was added in 8.0.0 + version: " - 7.3.99" + reason: index cloning was added in 7.4.0 # create index - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml index 426e67385b331..503cc15609072 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during clone index": - skip: - version: " - 7.99.99" - reason: index cloning was added in 8.0.0 + version: " - 7.3.99" + reason: index cloning was added in 7.4.0 features: [arbitrary_key] - do: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index 1732d1c1df6e0..465e1d2e11d70 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -101,8 +101,8 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); - if (type == ResizeType.CLONE && out.getVersion().before(Version.V_8_0_0)) { - throw new IllegalArgumentException("can't send clone request to a node that's older than " + Version.V_8_0_0); + if (type == ResizeType.CLONE && out.getVersion().before(Version.V_7_4_0)) { + throw new IllegalArgumentException("can't send clone request to a node that's older than " + Version.V_7_4_0); } out.writeEnum(type); out.writeOptionalBoolean(copySettings); From 5f0861aac00f13b66fcbe96e6715a41dd3a39612 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Thu, 25 Jul 2019 17:16:18 -0400 Subject: [PATCH 33/51] Add option to filter ILM explain response (#44777) In order to make it easier to interpret the output of the ILM Explain API, this commit adds two request parameters to that API: - `only_managed`, which causes the response to only contain indices which have `index.lifecycle.name` set - `only_errors`, which causes the response to contain only indices in an ILM error state "Error state" is defined as either being in the `ERROR` step or having `index.lifecycle.name` set to a policy that does not exist. --- docs/reference/ilm/apis/explain.asciidoc | 9 ++++ .../ExplainLifecycleRequest.java | 46 ++++++++++++++-- .../ExplainLifecycleRequestTests.java | 34 ++++++++---- .../ilm/TimeSeriesLifecycleActionsIT.java | 53 +++++++++++++++++-- .../test/ilm/40_explain_lifecycle.yml | 53 +++++++++++++++++++ .../xpack/ilm/IndexLifecycleService.java | 4 ++ .../action/RestExplainLifecycleAction.java | 4 +- .../TransportExplainLifecycleAction.java | 47 ++++++++++------ .../api/ilm.explain_lifecycle.json | 11 +++- 9 files changed, 228 insertions(+), 33 deletions(-) diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 85855d18beae6..3aa2572175eda 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -26,6 +26,15 @@ about any failures. ==== Request Parameters +`only_managed`:: + (boolean) Filters the returned indices to only indices that are managed by + ILM. + +`only_errors`:: + (boolean) Filters the returned indices to only indices that are managed by + ILM and are in an error state, either due to an encountering an error while + executing the policy, or attempting to use a policy that does not exist. + include::{docdir}/rest-api/timeoutparms.asciidoc[] ==== Authorization diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java index 037de2d505292..01e70dcce0920 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java @@ -6,9 +6,11 @@ package org.elasticsearch.xpack.core.indexlifecycle; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.Arrays; @@ -21,6 +23,10 @@ * {@link #indices(String...)} method */ public class ExplainLifecycleRequest extends ClusterInfoRequest { + private static final Version FILTERS_INTRODUCED_VERSION = Version.V_8_0_0; + + private boolean onlyErrors = false; + private boolean onlyManaged = false; public ExplainLifecycleRequest() { super(); @@ -28,6 +34,37 @@ public ExplainLifecycleRequest() { public ExplainLifecycleRequest(StreamInput in) throws IOException { super(in); + if (in.getVersion().onOrAfter(FILTERS_INTRODUCED_VERSION)) { + onlyErrors = in.readBoolean(); + onlyManaged = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().onOrAfter(FILTERS_INTRODUCED_VERSION)) { + out.writeBoolean(onlyErrors); + out.writeBoolean(onlyManaged); + } + } + + public boolean onlyErrors() { + return onlyErrors; + } + + public ExplainLifecycleRequest onlyErrors(boolean onlyErrors) { + this.onlyErrors = onlyErrors; + return this; + } + + public boolean onlyManaged() { + return onlyManaged; + } + + public ExplainLifecycleRequest onlyManaged(boolean onlyManaged) { + this.onlyManaged = onlyManaged; + return this; } @Override @@ -37,7 +74,7 @@ public ActionRequestValidationException validate() { @Override public int hashCode() { - return Objects.hash(Arrays.hashCode(indices()), indicesOptions()); + return Objects.hash(Arrays.hashCode(indices()), indicesOptions(), onlyErrors, onlyManaged); } @Override @@ -50,12 +87,15 @@ public boolean equals(Object obj) { } ExplainLifecycleRequest other = (ExplainLifecycleRequest) obj; return Objects.deepEquals(indices(), other.indices()) && - Objects.equals(indicesOptions(), other.indicesOptions()); + Objects.equals(indicesOptions(), other.indicesOptions()) && + Objects.equals(onlyErrors(), other.onlyErrors()) && + Objects.equals(onlyManaged(), other.onlyManaged()); } @Override public String toString() { - return "ExplainLifecycleRequest [indices()=" + Arrays.toString(indices()) + ", indicesOptions()=" + indicesOptions() + "]"; + return "ExplainLifecycleRequest [indices()=" + Arrays.toString(indices()) + ", indicesOptions()=" + indicesOptions() + + ", onlyErrors()=" + onlyErrors() + ", onlyManaged()=" + onlyManaged() + "]"; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java index 4c1ffac49a244..e8a96376d739c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequestTests.java @@ -26,6 +26,12 @@ protected ExplainLifecycleRequest createTestInstance() { randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); request.indicesOptions(indicesOptions); } + if (randomBoolean()) { + request.onlyErrors(randomBoolean()); + } + if (randomBoolean()) { + request.onlyManaged(randomBoolean()); + } return request; } @@ -33,21 +39,31 @@ protected ExplainLifecycleRequest createTestInstance() { protected ExplainLifecycleRequest mutateInstance(ExplainLifecycleRequest instance) throws IOException { String[] indices = instance.indices(); IndicesOptions indicesOptions = instance.indicesOptions(); - switch (between(0, 1)) { - case 0: - indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + boolean onlyErrors = instance.onlyErrors(); + boolean onlyManaged = instance.onlyManaged(); + switch (between(0, 3)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), () -> generateRandomStringArray(20, 10, false, false)); - break; - case 1: - indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - break; - default: - throw new AssertionError("Illegal randomisation branch"); + break; + case 2: + onlyErrors = !onlyErrors; + break; + case 3: + onlyManaged = !onlyManaged; + break; + default: + throw new AssertionError("Illegal randomisation branch"); } ExplainLifecycleRequest newRequest = new ExplainLifecycleRequest(); newRequest.indices(indices); newRequest.indicesOptions(indicesOptions); + newRequest.onlyErrors(onlyErrors); + newRequest.onlyManaged(onlyManaged); return newRequest; } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 21ee948299d1d..38dcbfb0ba7e9 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -54,9 +54,11 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -829,6 +831,45 @@ public void testCanStopILMWithPolicyUsingNonexistentPolicy() throws Exception { assertOK(client().performRequest(startILMReqest)); } + public void testExplainFilters() throws Exception { + String goodIndex = index + "-good-000001"; + String errorIndex = index + "-error"; + String nonexistantPolicyIndex = index + "-nonexistant-policy"; + String unmanagedIndex = index + "-unmanaged"; + + createFullPolicy(TimeValue.ZERO); + + createIndexWithSettings(goodIndex, Settings.builder() + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias") + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)); + createIndexWithSettingsNoAlias(errorIndex, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, policy)); + createIndexWithSettingsNoAlias(nonexistantPolicyIndex, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, randomValueOtherThan(policy, () -> randomAlphaOfLengthBetween(3,10)))); + createIndexWithSettingsNoAlias(unmanagedIndex, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + + assertBusy(() -> { + Map> explainResponse = explain(index + "*", false, false); + assertNotNull(explainResponse); + assertThat(explainResponse, + allOf(hasKey(goodIndex), hasKey(errorIndex), hasKey(nonexistantPolicyIndex), hasKey(unmanagedIndex))); + + Map> onlyManagedResponse = explain(index + "*", false, true); + assertNotNull(onlyManagedResponse); + assertThat(onlyManagedResponse, allOf(hasKey(goodIndex), hasKey(errorIndex), hasKey(nonexistantPolicyIndex))); + assertThat(onlyManagedResponse, not(hasKey(unmanagedIndex))); + + Map> onlyErrorsResponse = explain(index + "*", true, randomBoolean()); + assertNotNull(onlyErrorsResponse); + assertThat(onlyErrorsResponse, allOf(hasKey(errorIndex), hasKey(nonexistantPolicyIndex))); + assertThat(onlyErrorsResponse, allOf(not(hasKey(goodIndex)), not(hasKey(unmanagedIndex)))); + }); + } + private void createFullPolicy(TimeValue hotTime) throws IOException { Map hotActions = new HashMap<>(); hotActions.put(SetPriorityAction.NAME, new SetPriorityAction(100)); @@ -948,15 +989,21 @@ private String getReasonForIndex(String indexName) throws IOException { } private Map explainIndex(String indexName) throws IOException { - Request explainRequest = new Request("GET", indexName + "/_ilm/explain"); + return explain(indexName, false, false).get(indexName); + } + + private Map> explain(String indexPattern, boolean onlyErrors, boolean onlyManaged) throws IOException { + Request explainRequest = new Request("GET", indexPattern + "/_ilm/explain"); + explainRequest.addParameter("only_errors", Boolean.toString(onlyErrors)); + explainRequest.addParameter("only_managed", Boolean.toString(onlyManaged)); Response response = client().performRequest(explainRequest); Map responseMap; try (InputStream is = response.getEntity().getContent()) { responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); } - @SuppressWarnings("unchecked") Map indexResponse = ((Map>) responseMap.get("indices")) - .get(indexName); + @SuppressWarnings("unchecked") Map> indexResponse = + ((Map>) responseMap.get("indices")); return indexResponse; } diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml index baa051103d5fa..81f7064f8ce9a 100644 --- a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/40_explain_lifecycle.yml @@ -60,6 +60,12 @@ setup: - do: indices.create: index: my_index_no_policy + - do: + indices.create: + index: index_with_policy_that_doesnt_exist + body: + settings: + index.lifecycle.name: "a_policy_that_doesnt_exist" --- teardown: @@ -81,6 +87,10 @@ teardown: indices.delete: index: my_index_no_policy + - do: + indices.delete: + index: index_with_policy_that_doesnt_exist + - do: ilm.delete_lifecycle: policy: "my_moveable_timeseries_lifecycle" @@ -112,6 +122,7 @@ teardown: - is_false: indices.my_index2 - is_false: indices.another_index - is_false: indices.unmanaged_index + - is_false: indices.index_with_policy_that_doesnt_exist --- "Test Wildcard Index Lifecycle Explain": @@ -146,6 +157,7 @@ teardown: - is_false: indices.another_index - is_false: indices.unmanaged_index + - is_false: indices.index_with_policy_that_doesnt_exist --- @@ -201,6 +213,16 @@ teardown: - is_false: indices.another_index.failed_step - is_false: indices.another_index.step_info + - match: { indices.index_with_policy_that_doesnt_exist.index: "index_with_policy_that_doesnt_exist" } + - match: { indices.index_with_policy_that_doesnt_exist.policy: "a_policy_that_doesnt_exist" } + - match: { indices.index_with_policy_that_doesnt_exist.step_info.reason: "policy [a_policy_that_doesnt_exist] does not exist" } + - is_true: indices.index_with_policy_that_doesnt_exist.managed + - is_false: indices.index_with_policy_that_doesnt_exist.phase + - is_false: indices.index_with_policy_that_doesnt_exist.action + - is_false: indices.index_with_policy_that_doesnt_exist.step + - is_false: indices.index_with_policy_that_doesnt_exist.age + - is_false: indices.index_with_policy_that_doesnt_exist.failed_step + --- "Test Unmanaged Index Lifecycle Explain": @@ -221,3 +243,34 @@ teardown: - is_false: indices.my_index - is_false: indices.my_index2 - is_false: indices.another_index + - is_false: indices.index_with_policy_that_doesnt_exist + +--- +"Test filter for only managed indices": + + - do: + ilm.explain_lifecycle: + index: "*" + only_managed: true + + - match: { indices.my_index.index: "my_index" } + - match: { indices.my_index2.index: "my_index2" } + - match: { indices.another_index.index: "another_index" } + - match: { indices.index_with_policy_that_doesnt_exist.index: "index_with_policy_that_doesnt_exist" } + - is_false: indices.unmanaged_index + - is_false: indices.my_index_no_policy + +--- +"Test filter for only error indices": + + - do: + ilm.explain_lifecycle: + index: "*" + only_errors: true + + - match: { indices.index_with_policy_that_doesnt_exist.index: "index_with_policy_that_doesnt_exist" } + - is_false: indices.unmanaged_index + - is_false: indices.my_index_no_policy + - is_false: indices.my_index + - is_false: indices.my_index2 + - is_false: indices.another_index diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index 97b4bf5504e52..c38419fa9923a 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -214,6 +214,10 @@ public void triggered(SchedulerEngine.Event event) { } } + public boolean policyExists(String policyId) { + return policyRegistry.policyExists(policyId); + } + /** * executes the policy execution on the appropriate indices by running cluster-state tasks per index. * diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java index 2d6a451b6889a..85807a77dfb07 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java @@ -10,11 +10,11 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; import java.io.IOException; @@ -37,6 +37,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient ExplainLifecycleRequest explainLifecycleRequest = new ExplainLifecycleRequest(); explainLifecycleRequest.indices(indexes); explainLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); + explainLifecycleRequest.onlyManaged(restRequest.paramAsBoolean("only_managed", false)); + explainLifecycleRequest.onlyErrors(restRequest.paramAsBoolean("only_errors", false)); String masterNodeTimeout = restRequest.param("master_timeout"); if (masterNodeTimeout != null) { explainLifecycleRequest.masterNodeTimeout(masterNodeTimeout); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java index e5e2e051acff3..460764768dfbe 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.ErrorStep; import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleRequest; import org.elasticsearch.xpack.core.indexlifecycle.ExplainLifecycleResponse; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleExplainResponse; @@ -34,6 +35,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.LifecycleSettings; import org.elasticsearch.xpack.core.indexlifecycle.PhaseExecutionInfo; import org.elasticsearch.xpack.core.indexlifecycle.action.ExplainLifecycleAction; +import org.elasticsearch.xpack.ilm.IndexLifecycleService; import java.io.IOException; import java.util.HashMap; @@ -43,14 +45,16 @@ public class TransportExplainLifecycleAction extends TransportClusterInfoAction { private final NamedXContentRegistry xContentRegistry; + private final IndexLifecycleService indexLifecycleService; @Inject public TransportExplainLifecycleAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - NamedXContentRegistry xContentRegistry) { + NamedXContentRegistry xContentRegistry, IndexLifecycleService indexLifecycleService) { super(ExplainLifecycleAction.NAME, transportService, clusterService, threadPool, actionFilters, ExplainLifecycleRequest::new, indexNameExpressionResolver); this.xContentRegistry = xContentRegistry; + this.indexLifecycleService = indexLifecycleService; } @Override @@ -73,7 +77,7 @@ protected ClusterBlockException checkBlock(ExplainLifecycleRequest request, Clus @Override protected void doMasterOperation(ExplainLifecycleRequest request, String[] concreteIndices, ClusterState state, ActionListener listener) { - Map indexReponses = new HashMap<>(); + Map indexResponses = new HashMap<>(); for (String index : concreteIndices) { IndexMetaData idxMetadata = state.metaData().index(index); Settings idxSettings = idxMetadata.getSettings(); @@ -100,23 +104,34 @@ protected void doMasterOperation(ExplainLifecycleRequest request, String[] concr } final IndexLifecycleExplainResponse indexResponse; if (Strings.hasLength(policyName)) { - indexResponse = IndexLifecycleExplainResponse.newManagedIndexResponse(index, policyName, - lifecycleState.getLifecycleDate(), - lifecycleState.getPhase(), - lifecycleState.getAction(), - lifecycleState.getStep(), - lifecycleState.getFailedStep(), - lifecycleState.getPhaseTime(), - lifecycleState.getActionTime(), - lifecycleState.getStepTime(), - stepInfoBytes, - phaseExecutionInfo); - } else { + // If this is requesting only errors, only include indices in the error step or which are using a nonexistent policy + if (request.onlyErrors() == false + || (ErrorStep.NAME.equals(lifecycleState.getStep()) || indexLifecycleService.policyExists(policyName) == false)) { + indexResponse = IndexLifecycleExplainResponse.newManagedIndexResponse(index, policyName, + lifecycleState.getLifecycleDate(), + lifecycleState.getPhase(), + lifecycleState.getAction(), + lifecycleState.getStep(), + lifecycleState.getFailedStep(), + lifecycleState.getPhaseTime(), + lifecycleState.getActionTime(), + lifecycleState.getStepTime(), + stepInfoBytes, + phaseExecutionInfo); + } else { + indexResponse = null; + } + } else if (request.onlyManaged() == false && request.onlyErrors() == false) { indexResponse = IndexLifecycleExplainResponse.newUnmanagedIndexResponse(index); + } else { + indexResponse = null; + } + + if (indexResponse != null) { + indexResponses.put(indexResponse.getIndex(), indexResponse); } - indexReponses.put(indexResponse.getIndex(), indexResponse); } - listener.onResponse(new ExplainLifecycleResponse(indexReponses)); + listener.onResponse(new ExplainLifecycleResponse(indexResponses)); } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json index 8c48b22a5eba5..9b80525946fca 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.explain_lifecycle.json @@ -11,7 +11,16 @@ "description" : "The name of the index to explain" } }, - "params": {} + "params": { + "only_managed": { + "type": "boolean", + "description": "filters the indices included in the response to ones managed by ILM" + }, + "only_errors": { + "type": "boolean", + "description": "filters the indices included in the response to ones in an ILM error state, implies only_managed" + } + } }, "body": null } From f2a6dd7250ce2d037a3bf9cac57046a30458631a Mon Sep 17 00:00:00 2001 From: lcawl Date: Thu, 25 Jul 2019 15:03:57 -0700 Subject: [PATCH 34/51] [DOCS] Clarifies dataframe transform validations --- .../data-frames/apis/put-transform.asciidoc | 13 ++++++++----- .../data-frames/apis/start-transform.asciidoc | 8 +++++--- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index 6e2d1a33391ad..0aec3a8e372cb 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -46,6 +46,13 @@ source indices and a check that the destination index is not part of the source index pattern. You can use the `defer_validation` parameter to skip these checks. +Deferred validations are always run when the {dataframe-transform} is started, +with the exception of privilege checks. When {es} {security-features} are +enabled, the {dataframe-transform} remembers which roles the user that created +it had at the time of creation and uses those same roles. If those roles do not +have the required privileges on the source and destination indices, the +{dataframe-transform} fails when it attempts unauthorized operations. + IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. Do not put a {dataframe-transform} directly into any `.data-frame-internal*` indices using the Elasticsearch index API. @@ -66,11 +73,7 @@ IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. `defer_validation`:: (Optional, boolean) When `true`, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the - {dataframe-transform} is created. Deferred validations are always run when the - {dataframe-transform} is started, with the exception of privilege checks. If - the user who created the transform does not have the required privileges on - the source and destination indices, the transform starts but then fails when - it attempts the unauthorized operation. The default value is `false`. + {dataframe-transform} is created. [[put-data-frame-transform-request-body]] ==== {api-request-body-title} diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/data-frames/apis/start-transform.asciidoc index e82f037952698..2c5f0ca6517ad 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/data-frames/apis/start-transform.asciidoc @@ -44,9 +44,11 @@ index take precedence over dynamic mappings and templates. When the {dataframe-transform} starts, a series of validations occur to ensure its success. If you deferred validation when you created the {dataframe-transform}, they occur when you start the transform--with the -exception of privilege checks. If the user who created the transform does not -have the required privileges on the source and destination indices, the -transform starts but then fails when it attempts the unauthorized operation. +exception of privilege checks. When {es} {security-features} are enabled, the +{dataframe-transform} remembers which roles the user that created it had at the +time of creation and uses those same roles. If those roles do not have the +required privileges on the source and destination indices, the +{dataframe-transform} fails when it attempts unauthorized operations. [[start-data-frame-transform-path-parms]] ==== {api-path-parms-title} From b8ef6127f20ed8b690fb6c154c1c40301d6dd925 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Fri, 26 Jul 2019 05:57:02 +0200 Subject: [PATCH 35/51] Upgrade to Lucene 8.2.0 release (#44859) --- buildSrc/version.properties | 2 +- .../lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-8.2.0.jar.sha1 | 1 + .../lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analyzers-icu-8.2.0.jar.sha1 | 1 + ...lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-8.2.0.jar.sha1 | 1 + .../lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analyzers-nori-8.2.0.jar.sha1 | 1 + ...lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-8.2.0.jar.sha1 | 1 + .../lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-8.2.0.jar.sha1 | 1 + .../lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-8.2.0.jar.sha1 | 1 + ...cene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../licenses/lucene-analyzers-morfologik-8.2.0.jar.sha1 | 1 + .../lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-analyzers-common-8.2.0.jar.sha1 | 1 + .../lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-8.2.0.jar.sha1 | 1 + server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-core-8.2.0.jar.sha1 | 1 + .../licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-grouping-8.2.0.jar.sha1 | 1 + .../lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-highlighter-8.2.0.jar.sha1 | 1 + server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-join-8.2.0.jar.sha1 | 1 + .../licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-memory-8.2.0.jar.sha1 | 1 + server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-misc-8.2.0.jar.sha1 | 1 + .../licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-queries-8.2.0.jar.sha1 | 1 + .../lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-queryparser-8.2.0.jar.sha1 | 1 + .../licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-sandbox-8.2.0.jar.sha1 | 1 + .../licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-spatial-8.2.0.jar.sha1 | 1 + .../lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-8.2.0.jar.sha1 | 1 + .../lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-spatial3d-8.2.0.jar.sha1 | 1 + .../licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - server/licenses/lucene-suggest-8.2.0.jar.sha1 | 1 + .../licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 | 1 - .../plugin/sql/sql-action/licenses/lucene-core-8.2.0.jar.sha1 | 1 + 49 files changed, 25 insertions(+), 25 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-8.2.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-core-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-grouping-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-join-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-memory-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-misc-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-queries-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-spatial-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-8.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 server/licenses/lucene-suggest-8.2.0.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 5cff6641ac590..e3e2782448330 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.0.0 -lucene = 8.2.0-snapshot-6413aae226 +lucene = 8.2.0 bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691 diff --git a/modules/lang-expression/licenses/lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 22c90f85ee030..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da9aba1dcaea004f04a37fd5c1b900a2347d4cdd \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..6e4047678cf09 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.2.0.jar.sha1 @@ -0,0 +1 @@ +afec1e7228eca31b5f469bdcbbc84d04b0748eae \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index e072bc01faf90..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da0465f77ffacb36672dcd6075319e02dbe76673 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..d733c7da58975 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.2.0.jar.sha1 @@ -0,0 +1 @@ +246a593068e012d0deac604cde68734b3e843aa3 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 4308cf1e5e651..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd4d0398da5d78187cb636f19b4b81d05e238cd4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..8be94f834f84b --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.2.0.jar.sha1 @@ -0,0 +1 @@ +169e079501f3e0b143c4ea3c953a3cc9aff8758a \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index a97c558f16c45..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -740e2a96d58fdf2299e5e9c3e156c33b1b2b7d60 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..f6e4c6e0b923c --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.2.0.jar.sha1 @@ -0,0 +1 @@ +4f0feca14e6ac73b708a9ccd437478260a46bead \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 7949cdcb19e7e..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4654bbbfdd81356a07f382bb0f8cadf6c6ba81f6 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..0699472550d4d --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.2.0.jar.sha1 @@ -0,0 +1 @@ +a87df79bb727bbe355dbcf367e4489fc1010343f \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 135954405a9f8..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5dc5048018786e5b0719263533e74f4ba144ed3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..eb0fc640980b3 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.2.0.jar.sha1 @@ -0,0 +1 @@ +cb93b65fb1ddd218142ebc50857c56a61b3b578b \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index fdafa7fe20e71..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c541cc37972991a56153474679357a262c2026c \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..13fcb479542bd --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.2.0.jar.sha1 @@ -0,0 +1 @@ +6db9b08863134ef1c080f30376693a55de3a372b \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index beb30ff476a6e..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ac60095d79c31625baa58f24ae5eec4ef23c9a4 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..4afbba111c55a --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.2.0.jar.sha1 @@ -0,0 +1 @@ +b89c9f985cdcf3f563d6e66057b2fd6e22e75f77 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 962a7447eeee6..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc386e4b342d56474f4220d3906fb73432be12ee \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.2.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..e99f6769fb0e8 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.2.0.jar.sha1 @@ -0,0 +1 @@ +8e8abc90572ed74b110c75b546c675153aecc570 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index a8cd7645a9b60..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -93b881f369fc1f71eaee7b3604885b9acf38e807 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..279c896a6705b --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.2.0.jar.sha1 @@ -0,0 +1 @@ +91397b1e0dab4a66e9e58a82ab1690f0383aaced \ No newline at end of file diff --git a/server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 8e703b4ec6b84..0000000000000 --- a/server/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79f8f65bf5a536b95a5e1074ba431544a0a73fcb \ No newline at end of file diff --git a/server/licenses/lucene-core-8.2.0.jar.sha1 b/server/licenses/lucene-core-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..538ece1082382 --- /dev/null +++ b/server/licenses/lucene-core-8.2.0.jar.sha1 @@ -0,0 +1 @@ +f6da40436d3633de272810fae1e339c237adfcf6 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index e5a8057ec84f9..0000000000000 --- a/server/licenses/lucene-grouping-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d7e6a3723bf52c101bb09c39dc532fff7db74d89 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.2.0.jar.sha1 b/server/licenses/lucene-grouping-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..7054df97d2b81 --- /dev/null +++ b/server/licenses/lucene-grouping-8.2.0.jar.sha1 @@ -0,0 +1 @@ +a457b6ae0b02a02c9fc7061a19289601554c320a \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 450f408212bec..0000000000000 --- a/server/licenses/lucene-highlighter-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5618357f383674274fbc300213242acb298fedfb \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.2.0.jar.sha1 b/server/licenses/lucene-highlighter-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..ac3b578156c51 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.2.0.jar.sha1 @@ -0,0 +1 @@ +21bdc9d7e134c9e8bb2bab7a5c32f5ff08b345ec \ No newline at end of file diff --git a/server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index cc2161879b7bc..0000000000000 --- a/server/licenses/lucene-join-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f09a43945578dba87eecda0b316cb980c64ced3c \ No newline at end of file diff --git a/server/licenses/lucene-join-8.2.0.jar.sha1 b/server/licenses/lucene-join-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..cfd2ded2e913d --- /dev/null +++ b/server/licenses/lucene-join-8.2.0.jar.sha1 @@ -0,0 +1 @@ +6e1f359cb49868ec2482cb1af7f32b19ac70fcf3 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 0aa293c816f46..0000000000000 --- a/server/licenses/lucene-memory-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0c8338473d317024ab6d60a3b681eb0489aae80 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.2.0.jar.sha1 b/server/licenses/lucene-memory-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..5feab9e898ca8 --- /dev/null +++ b/server/licenses/lucene-memory-8.2.0.jar.sha1 @@ -0,0 +1 @@ +719c1c86f525d58a717eb6338552cd3aaa19d56c \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 0e0a91bffe119..0000000000000 --- a/server/licenses/lucene-misc-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8ab08f7a01e7109d64eb7b17d6099c51abe77b2f \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.2.0.jar.sha1 b/server/licenses/lucene-misc-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..88c672cbed409 --- /dev/null +++ b/server/licenses/lucene-misc-8.2.0.jar.sha1 @@ -0,0 +1 @@ +539c353c1861df0ace480978429f48a4bccd29c4 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 95ddbb354cc99..0000000000000 --- a/server/licenses/lucene-queries-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8ad706739d679b1f7d076a2f70e3cfa794292cb \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.2.0.jar.sha1 b/server/licenses/lucene-queries-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..2e3e0ddd3e999 --- /dev/null +++ b/server/licenses/lucene-queries-8.2.0.jar.sha1 @@ -0,0 +1 @@ +5da383678cb0a35a07ccb03487ba00cf184d1d71 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 6b381afbf6264..0000000000000 --- a/server/licenses/lucene-queryparser-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c688121186a33d0ac5283c24fb0b1dd18de1d1f5 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.2.0.jar.sha1 b/server/licenses/lucene-queryparser-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..35fd1c4fa5920 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.2.0.jar.sha1 @@ -0,0 +1 @@ +8925df7b104e78e308e236ff0740a064dd93cadd \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index c1fd694c80e3d..0000000000000 --- a/server/licenses/lucene-sandbox-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f7a0a7be83093e77775aaec3be63e59a537166e \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.2.0.jar.sha1 b/server/licenses/lucene-sandbox-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..73bc68f4796b5 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.2.0.jar.sha1 @@ -0,0 +1 @@ +f50931f1db40cdcc31e5044439d4e5522a23f6c1 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index c85ac98032cb6..0000000000000 --- a/server/licenses/lucene-spatial-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3cfe4a86fad519f1a78dfbdb8b1133550f7cb5d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.2.0.jar.sha1 b/server/licenses/lucene-spatial-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..f475db434c2f0 --- /dev/null +++ b/server/licenses/lucene-spatial-8.2.0.jar.sha1 @@ -0,0 +1 @@ +8a9edbc075ae5fd6ee2265f0bb6d5847e78c8a96 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index fd1f769ec7856..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6710bc40dc4108fe12f9f56b3e23660c40f65df6 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..994a4328c5001 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.2.0.jar.sha1 @@ -0,0 +1 @@ +1335a4a876a82dbbb79df8172133df66de06689f \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 14f72bd268d80..0000000000000 --- a/server/licenses/lucene-spatial3d-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8769653d5fadddf0f376e152700b9578bddd74e7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..e659f46f858d0 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.2.0.jar.sha1 @@ -0,0 +1 @@ +0bc0ee3f2d70cf66dc79a781b9edd6311f1f6a49 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 b/server/licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index e100acc8b2ca7..0000000000000 --- a/server/licenses/lucene-suggest-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96c17ce3b4c9e8c9b6a525a8204e7dd2ea18496c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.2.0.jar.sha1 b/server/licenses/lucene-suggest-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..9dd04618a7d44 --- /dev/null +++ b/server/licenses/lucene-suggest-8.2.0.jar.sha1 @@ -0,0 +1 @@ +334d627bda935dfb34e8e1c78d8f5a28b4be325a \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 deleted file mode 100644 index 8e703b4ec6b84..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0-snapshot-6413aae226.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79f8f65bf5a536b95a5e1074ba431544a0a73fcb \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0.jar.sha1 new file mode 100644 index 0000000000000..538ece1082382 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.2.0.jar.sha1 @@ -0,0 +1 @@ +f6da40436d3633de272810fae1e339c237adfcf6 \ No newline at end of file From be7bea995422118025213d77e004d79c915a93c4 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Fri, 26 Jul 2019 09:37:45 +0300 Subject: [PATCH 36/51] Document xpack.security.authc.saml.realm for Kibana (#44705) Since 7.3, it's possible to explicitly configure the SAML realm to be used in Kibana's configuration. This in turn, eliminates the need of properly setting `xpack.security.public.*` settings in Kibana and largely simplifies relevant documentation. This also changes `xpack.security.authProviders` to `xpack.security.authc.providers` as the former was deprecated in favor of the latter in 7.3 in Kibana --- .../authentication/saml-guide.asciidoc | 44 +++++-------------- 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 6cbf8cbd57d4a..5bb02e6083d57 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -741,20 +741,25 @@ SAML authentication in {kib} is also subject to the `xpack.security.sessionTimeout` setting that is described in the {kib} security documentation, and you may wish to adjust this timeout to meet your local needs. -The two additional settings that are required for SAML support are shown below: +The three additional settings that are required for SAML support are shown below: [source, yaml] ------------------------------------------------------------ -xpack.security.authProviders: [saml] +xpack.security.authc.providers: [saml] +xpack.security.authc.saml.realm: saml1 server.xsrf.whitelist: [/api/security/v1/saml] ------------------------------------------------------------ The configuration values used in the example above are: -`xpack.security.authProviders`:: +`xpack.security.authc.providers`:: Set this to `[ saml ]` to instruct {kib} to use SAML SSO as the authentication method. +`xpack.security.authc.saml.realm`:: +Set this to the name of the SAML realm that you have used in your <>, for instance: `saml1` + `server.xsrf.whitelist`:: {kib} has in-built protection against _Cross Site Request Forgery_ attacks which are designed to prevent the {kib} server from processing requests that @@ -763,47 +768,18 @@ In order to support SAML authentication messages that originate from your Identity Provider, we need to explicitly _whitelist_ the SAML authentication URL within {kib}, so that the {kib} server will not reject these external messages. -If your {kib} instance is behind a proxy, you may also need to add configuration -to tell {kib} how to form its public URL. This is needed because all SAML -messages are exchanged via the user's web browser, so {kib} needs to know what -URLs are used within the browser. In this case, the following settings should be -added to your `kibana.yml` configuration file: - -[source, yaml] ------------------------------------------------------------- -xpack.security.public: - protocol: https - hostname: kibana.proxy.com - port: 443 ------------------------------------------------------------- - -`xpack.security.public.protocol`:: -This is the protocol that the user's web browser uses to connect to the proxy. -Must be one of `http` or `https`. It is strongly recommended that you use the -`https` protocol for all access to {kib}. - -`xpack.security.public.hostname`:: -The fully qualified hostname that your users use to connect to the proxy server. - -`xpack.security.public.port`:: -The port number that your users use to connect to the proxy server (e.g. `80` -for `http` or `443` for `https`). - -These values must be aligned with the URLs used in the {es} configuration for -`sp.acs` and `sp.logout`. - [[saml-kibana-basic]] ==== Supporting SAML and basic authentication in {kib} The SAML support in {kib} is designed on the expectation that it will be the primary (or sole) authentication method for users of that {kib} instance. However, it is possible to support both SAML and Basic authentication within a -single {kib} instance by setting `xpack.security.authProviders` as per the +single {kib} instance by setting `xpack.security.authc.providers` as per the example below: [source, yaml] ------------------------------------------------------------ -xpack.security.authProviders: [saml, basic] +xpack.security.authc.providers: [saml, basic] ------------------------------------------------------------ The order is important - this will _initiate_ SAML authentication for From 321c2b86270252cd3a973205e74276bcabd250cb Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 26 Jul 2019 09:34:36 +0200 Subject: [PATCH 37/51] Force Merge should reject requests with `only_expunge_deletes` and `max_num_segments` set (#44761) This commit changes the ForceMergeRequest.validate() method so that it does not accept the parameters only_expunge_deletes and max_num_segments to be set at the same time. The motivation is that InternalEngine.forceMerge() just ignores the max. number of segments parameter when the only expunge parameter is set to true, leaving the wrong impression to the user that max. number of segments has been applied. It also changes InternalEngine.forceMerge() so that it now throws an exception when both parameters are set, and modifies tests where needed. Because it changes the behavior of the REST API I marked this as >breaking. Closes #43102 --- .../IndicesClientDocumentationIT.java | 4 ++ docs/reference/migration/migrate_8_0.asciidoc | 2 + .../migration/migrate_8_0/indices.asciidoc | 11 ++++ .../test/indices.forcemerge/10_basic.yml | 21 ++++++++ .../indices/forcemerge/ForceMergeRequest.java | 13 +++++ .../index/engine/InternalEngine.java | 3 ++ .../forcemerge/ForceMergeRequestTests.java | 54 +++++++++++++++++++ .../index/engine/InternalEngineTests.java | 24 ++++++--- 8 files changed, 124 insertions(+), 8 deletions(-) create mode 100644 docs/reference/migration/migrate_8_0/indices.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 6a01400e006a1..607a11590e52d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -1313,6 +1313,10 @@ public void testForceMergeIndex() throws Exception { request.onlyExpungeDeletes(true); // <1> // end::force-merge-request-only-expunge-deletes + // set only expunge deletes back to its default value + // as it is mutually exclusive with max. num. segments + request.onlyExpungeDeletes(ForceMergeRequest.Defaults.ONLY_EXPUNGE_DELETES); + // tag::force-merge-request-flush request.flush(true); // <1> // end::force-merge-request-flush diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index ff3f5030ed9fb..4f56b628caf1a 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -27,6 +27,7 @@ coming[8.0.0] * <> * <> * <> +* <> //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide @@ -65,3 +66,4 @@ include::migrate_8_0/http.asciidoc[] include::migrate_8_0/reindex.asciidoc[] include::migrate_8_0/search.asciidoc[] include::migrate_8_0/settings.asciidoc[] +include::migrate_8_0/indices.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/indices.asciidoc b/docs/reference/migration/migrate_8_0/indices.asciidoc new file mode 100644 index 0000000000000..05b9a299b9ec1 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/indices.asciidoc @@ -0,0 +1,11 @@ +[float] +[[breaking_80_indices_changes]] +=== Force Merge API changes + +Previously, the Force Merge API allowed the parameters `only_expunge_deletes` +and `max_num_segments` to be set to a non default value at the same time. But +the `max_num_segments` was silently ignored when `only_expunge_deletes` is set +to `true`, leaving the false impression that it has been applied. + +The Force Merge API now rejects requests that have a `max_num_segments` greater +than or equal to 0 when the `only_expunge_deletes` is set to true. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index 6f1c6ea949665..0889effc3d509 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -8,3 +8,24 @@ indices.forcemerge: index: testing max_num_segments: 1 + +--- +"Force merge with incompatible only_expunge_deletes and max_num_segments values": + - skip: + version: " - 7.9.99" + reason: only_expunge_deletes and max_num_segments are mutually exclusive since 8.0 + + - do: + indices.create: + index: test + + - do: + catch: bad_request + indices.forcemerge: + index: test + max_num_segments: 10 + only_expunge_deletes: true + + - match: { status: 400 } + - match: { error.type: action_request_validation_exception } + - match: { error.reason: "Validation Failed: 1: cannot set only_expunge_deletes and max_num_segments at the same time, those two parameters are mutually exclusive;" } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index b7fa9094540a7..bc810d4f6477d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -19,12 +19,15 @@ package org.elasticsearch.action.admin.indices.forcemerge; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import static org.elasticsearch.action.ValidateActions.addValidationError; + /** * A request to force merging the segments of one or more indices. In order to * run a merge on all the indices, pass an empty array or {@code null} for the @@ -122,6 +125,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(flush); } + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationError = super.validate(); + if (onlyExpungeDeletes && maxNumSegments != Defaults.MAX_NUM_SEGMENTS) { + validationError = addValidationError("cannot set only_expunge_deletes and max_num_segments at the same time, those two " + + "parameters are mutually exclusive", validationError); + } + return validationError; + } + @Override public String toString() { return "ForceMergeRequest{" + diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index af0adfdedcf45..5ea51c57f90b9 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1895,6 +1895,9 @@ final Map getVersionMap() { @Override public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, final boolean upgrade, final boolean upgradeOnlyAncientSegments) throws EngineException, IOException { + if (onlyExpungeDeletes && maxNumSegments >= 0) { + throw new IllegalArgumentException("only_expunge_deletes and max_num_segments are mutually exclusive"); + } /* * We do NOT acquire the readlock here since we are waiting on the merges to finish * that's fine since the IW.rollback should stop all the threads and trigger an IOException diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java new file mode 100644 index 0000000000000..f672a22aaf509 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.forcemerge; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class ForceMergeRequestTests extends ESTestCase { + + public void testValidate() { + final boolean flush = randomBoolean(); + final boolean onlyExpungeDeletes = randomBoolean(); + final int maxNumSegments = randomIntBetween(ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS, 100); + + final ForceMergeRequest request = new ForceMergeRequest(); + request.flush(flush); + request.onlyExpungeDeletes(onlyExpungeDeletes); + request.maxNumSegments(maxNumSegments); + + assertThat(request.flush(), equalTo(flush)); + assertThat(request.onlyExpungeDeletes(), equalTo(onlyExpungeDeletes)); + assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); + + ActionRequestValidationException validation = request.validate(); + if (onlyExpungeDeletes && maxNumSegments != ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS) { + assertThat(validation, notNullValue()); + assertThat(validation.validationErrors(), contains("cannot set only_expunge_deletes and max_num_segments at the " + + "same time, those two parameters are mutually exclusive")); + } else { + assertThat(validation, nullValue()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 0df178f924e58..8b56c0181adfb 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -189,6 +189,7 @@ import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -197,6 +198,7 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -1225,8 +1227,7 @@ public void testRenewSyncFlush() throws Exception { Engine.SyncedFlushResult.SUCCESS); assertEquals(3, engine.segments(false).size()); - engine.forceMerge(forceMergeFlushes, 1, false, - false, false); + engine.forceMerge(forceMergeFlushes, 1, false, false, false); if (forceMergeFlushes == false) { engine.refresh("make all segments visible"); assertEquals(4, engine.segments(false).size()); @@ -1471,7 +1472,7 @@ public void testForceMergeWithoutSoftDeletes() throws IOException { Engine.Index index = indexForDoc(doc); engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); //expunge deletes - engine.forceMerge(true, 10, true, false, false); + engine.forceMerge(true, -1, true, false, false); engine.refresh("test"); assertEquals(engine.segments(true).size(), 1); @@ -1752,8 +1753,7 @@ public void run() { engine.refresh("test"); indexed.countDown(); try { - engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), - randomBoolean()); + engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), randomBoolean()); } catch (IOException e) { return; } @@ -3162,8 +3162,7 @@ public void run() { try { switch (operation) { case "optimize": { - engine.forceMerge(true, 1, false, false, - false); + engine.forceMerge(true, 1, false, false, false); break; } case "refresh": { @@ -4364,7 +4363,16 @@ public void testRandomOperations() throws Exception { engine.flush(); } if (randomBoolean()) { - engine.forceMerge(randomBoolean(), between(1, 10), randomBoolean(), false, false); + boolean flush = randomBoolean(); + boolean onlyExpungeDeletes = randomBoolean(); + int maxNumSegments = randomIntBetween(-1, 10); + try { + engine.forceMerge(flush, maxNumSegments, onlyExpungeDeletes, false, false); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("only_expunge_deletes and max_num_segments are mutually exclusive")); + assertThat(onlyExpungeDeletes, is(true)); + assertThat(maxNumSegments, greaterThan(-1)); + } } } if (engine.engineConfig.getIndexSettings().isSoftDeleteEnabled()) { From 16a4aa56bdeddda0ba530e29a1e158a7c0236a0a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 26 Jul 2019 17:06:10 +0900 Subject: [PATCH 38/51] Deprecate setting processors to more than available (#44889) Today the processors setting is permitted to be set to more than the number of processors available to the JVM. The processors setting directly sizes the number of threads in the various thread pools, with most of these sizes being a linear function in the number of processors. It doesn't make any sense to set processors very high as the overhead from context switching amongst all the threads will overwhelm, and changing the setting does not control how many physical CPU resources there are on which to schedule the additional threads. We have to draw a line somewhere and this commit deprecates setting processors to more than the number of available processors. This is the right place to draw the line given the linear growth as a function of processors in most of the thread pools, and that some are capped at the number of available processors already. --- .../common/util/concurrent/EsExecutors.java | 25 +++++++++++++--- .../test/InternalTestCluster.java | 2 +- .../elasticsearch/xpack/watcher/Watcher.java | 10 ++++--- .../xpack/watcher/WatcherPluginTests.java | 30 ++++++++----------- 4 files changed, 41 insertions(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 7b304cd092a26..561a820d49078 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -19,8 +19,10 @@ package org.elasticsearch.common.util.concurrent; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -46,12 +48,27 @@ public class EsExecutors { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(EsExecutors.class)); + /** - * Settings key to manually set the number of available processors. - * This is used to adjust thread pools sizes etc. per node. + * Setting to manually set the number of available processors. This setting is used to adjust thread pool sizes per node. */ - public static final Setting PROCESSORS_SETTING = - Setting.intSetting("processors", Runtime.getRuntime().availableProcessors(), 1, Property.NodeScope); + public static final Setting PROCESSORS_SETTING = new Setting<>( + "processors", + s -> Integer.toString(Runtime.getRuntime().availableProcessors()), + s -> { + final int value = Setting.parseInt(s, 1, "processors"); + final int availableProcessors = Runtime.getRuntime().availableProcessors(); + if (value > availableProcessors) { + deprecationLogger.deprecatedAndMaybeLog( + "processors", + "setting processors to value [{}] which is more than available processors [{}] is deprecated", + value, + availableProcessors); + } + return value; + }, + Property.NodeScope); /** * Returns the number of available processors. Defaults to diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 4476753911913..35f113cd75444 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -450,7 +450,7 @@ private static Settings getRandomNodeSettings(long seed) { builder.put(SearchService.DEFAULT_KEEPALIVE_SETTING.getKey(), timeValueSeconds(100 + random.nextInt(5 * 60)).getStringRep()); } - builder.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1 + random.nextInt(3)); + builder.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1 + random.nextInt(Math.min(4, Runtime.getRuntime().availableProcessors()))); if (random.nextBoolean()) { if (random.nextBoolean()) { builder.put("indices.fielddata.cache.size", 1 + random.nextInt(1000), ByteSizeUnit.MB); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 73d491de725d0..3ce863a1aaa8a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -515,11 +515,13 @@ public List> getExecutorBuilders(final Settings settings) { * @param settings The current settings * @return A number between 5 and the number of processors */ - static int getWatcherThreadPoolSize(Settings settings) { - boolean isDataNode = Node.NODE_DATA_SETTING.get(settings); + static int getWatcherThreadPoolSize(final Settings settings) { + return getWatcherThreadPoolSize(Node.NODE_DATA_SETTING.get(settings), EsExecutors.numberOfProcessors(settings)); + } + + static int getWatcherThreadPoolSize(final boolean isDataNode, final int numberOfProcessors) { if (isDataNode) { - int numberOfProcessors = EsExecutors.numberOfProcessors(settings); - long size = Math.max(Math.min(5 * numberOfProcessors, 50), numberOfProcessors); + final long size = Math.max(Math.min(5 * numberOfProcessors, 50), numberOfProcessors); return Math.toIntExact(size); } else { return 1; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index c9e0ed8934359..5575915188351 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -86,23 +86,19 @@ public void testWatcherDisabledTests() throws Exception { public void testThreadPoolSize() { // old calculation was 5 * number of processors - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 1).build()), is(5)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 2).build()), is(10)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 4).build()), is(20)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 8).build()), is(40)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 9).build()), is(45)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 10).build()), is(50)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 16).build()), is(50)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 24).build()), is(50)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 50).build()), is(50)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 51).build()), is(51)); - assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 96).build()), is(96)); - - Settings noDataNodeSettings = Settings.builder() - .put("processors", scaledRandomIntBetween(1, 100)) - .put("node.data", false) - .build(); - assertThat(Watcher.getWatcherThreadPoolSize(noDataNodeSettings), is(1)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 1), is(5)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 2), is(10)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 4), is(20)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 8), is(40)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 9), is(45)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 10), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 16), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 24), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 50), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 51), is(51)); + assertThat(Watcher.getWatcherThreadPoolSize(true, 96), is(96)); + + assertThat(Watcher.getWatcherThreadPoolSize(false, scaledRandomIntBetween(1, 100)), is(1)); } public void testReload() { From e3997c67133bcc7523781bd2188da0b2c2200c6b Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 26 Jul 2019 10:12:59 +0200 Subject: [PATCH 39/51] Ensure cluster is stable in ShrinkIndexIT.testShrinkThenSplitWithFailedNode (#44860) The test ShrinkIndexIT.testShrinkThenSplitWithFailedNode sometimes fails because the resize operation is not acknowledged (see #44736). This resize operation creates a new index "splitagain" and it results in a cluster state update (TransportResizeAction uses MetaDataCreateIndexService.createIndex() to create the resized index). This cluster state update is expected to be acknowledged by all nodes (see IndexCreationTask.onAllNodesAcked()) but this is not always true: the data node that was just stopped in the test before executing the resize operation might still be considered as a "faulty" node (and not yet removed from the cluster nodes) by the FollowersChecker. The cluster state is then acked on all nodes but one, and it results in a non acknowledged resize operation. This commit adds an ensureStableCluster() check after stopping the node in the test. The goal is to ensure that the data node has been correctly removed from the cluster and that all nodes are fully connected to each before moving forward with the resize operation. Closes #44736 --- .../action/admin/indices/create/ShrinkIndexIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 582ab09a1f868..1ee344e326a00 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -580,7 +580,9 @@ public void testShrinkThenSplitWithFailedNode() throws Exception { .build()).setResizeType(ResizeType.SHRINK).get()); ensureGreen(); + final int nodeCount = cluster().size(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(shrinkNode)); + ensureStableCluster(nodeCount - 1); // demonstrate that the index.routing.allocation.initial_recovery setting from the shrink doesn't carry over into the split index, // because this would cause the shrink to fail as the initial_recovery node is no longer present. From 84793476ba5eb2e3a3be55f25c8411c3cde778cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 26 Jul 2019 11:39:59 +0200 Subject: [PATCH 40/51] [DOCS] Amends data frame analytics resources, GET, and PUT API docs (#44806) This PR addresses the feedback in https://github.com/elastic/ml-team/issues/175#issuecomment-512215731. * Adds an example to `analyzed_fields` * Includes `source` and `dest` objects inline in the resource page * Lists `model_memory_limit` in the PUT API page * Amends the `analysis` section in the resource page * Removes Properties headings in subsections --- .../apis/dfanalyticsresources.asciidoc | 91 +++++++++---------- .../apis/get-dfanalytics-stats.asciidoc | 22 ++++- .../apis/get-dfanalytics.asciidoc | 39 +++++--- .../apis/put-dfanalytics.asciidoc | 18 +++- 4 files changed, 102 insertions(+), 68 deletions(-) diff --git a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc index 86f3e15ed06f8..db5e932ab1165 100644 --- a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc +++ b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc @@ -18,10 +18,36 @@ (object) You can specify both `includes` and/or `excludes` patterns. If `analyzed_fields` is not set, only the relevant fields will be included. For example all the numeric fields for {oldetection}. + +[source,js] +-------------------------------------------------- +PUT _ml/data_frame/analytics/loganalytics +{ + "source": { + "index": "logdata" + }, + "dest": { + "index": "logdata_out" + }, + "analysis": { + "outlier_detection": { + } + }, + "analyzed_fields": { + "includes": [ "request.bytes", "response.counts.error" ], + "excludes": [ "source.geo" ] + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:setup_logdata] `dest`:: - (object) The destination configuration of the analysis. For more information, - see <>. + (object) The destination configuration of the analysis. The `index` property + (string) is the name of the index in which to store the results of the + {dfanalytics-job}. The `results_field` (string) property defines the name of + the field in which to store the results of the analysis. The default value is + `ml`. `id`:: (string) The unique identifier for the {dfanalytics-job}. This identifier can @@ -38,25 +64,29 @@ that setting. For more information, see <>. `source`:: - (object) The source configuration, consisting of `index` and optionally a - `query`. For more information, see <>. + (object) The source configuration, consisting of `index` (array) which is an + array of index names on which to perform the analysis. It can be a single + index or index pattern as well as an array of indices or patterns. Optionally, + `source` can have a `query` (object) property. The {es} query domain-specific + language (DSL). This value corresponds to the query object in an {es} search + POST body. All the options that are supported by {es} can be used, as this + object is passed verbatim to {es}. By default, this property has the following + value: `{"match_all": {}}`. [[dfanalytics-types]] ==== Analysis objects {dfanalytics-cap} resources contain `analysis` objects. For example, when you -create a {dfanalytics-job}, you must define the type of analysis it performs. +create a {dfanalytics-job}, you must define the type of analysis it performs. +Currently, `outlier_detection` is the only available type of analysis, however, +other types will be added, for example `regression`. [discrete] [[oldetection-resources]] -===== {oldetection-cap} configuration objects +==== {oldetection-cap} configuration objects An {oldetection} configuration object has the following properties: -[discrete] -[[oldetection-properties]] -==== {api-definitions-title} - `n_neighbors`:: (integer) Defines the value for how many nearest neighbors each method of {oldetection} will use to calculate its {olscore}. When the value is @@ -65,44 +95,11 @@ An {oldetection} configuration object has the following properties: `method`:: (string) Sets the method that {oldetection} uses. If the method is not set {oldetection} uses an ensemble of different methods and normalises and - combines their individual {olscores} to obtain the overall {olscore}. - Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`. + combines their individual {olscores} to obtain the overall {olscore}. We + recommend to use the ensemble method. Available methods are `lof`, `ldof`, + `distance_kth_nn`, `distance_knn`. `feature_influence_threshold`:: (double) The minimum {olscore} that a document needs to have in order to calculate its {fiscore}. - Value range: 0-1 (`0.1` by default). - -[[dfanalytics-dest-resources]] -==== Dest configuration objects - -{dfanalytics-cap} resources contain `dest` objects. For example, when you -create a {dfanalytics-job}, you must define its destination. - -[discrete] -[[dfanalytics-dest-properties]] -==== {api-definitions-title} - -`index`:: - (string) The name of the index in which to store the results of the - {dfanalytics-job}. - -`results_field`:: - (string) The name of the field in which to store the results of the analysis. - The default value is `ml`. - -[[dfanalytics-source-resources]] -==== Source configuration objects - -The `source` configuration object has the following properties: - -`index`:: - (array) An array of index names on which to perform the analysis. It can be a - single index or index pattern as well as an array of indices or patterns. - -`query`:: - (object) The {es} query domain-specific language (DSL). This value - corresponds to the query object in an {es} search POST body. All the - options that are supported by {es} can be used, as this object is - passed verbatim to {es}. By default, this property has the following - value: `{"match_all": {}}`. \ No newline at end of file + Value range: 0-1 (`0.1` by default). \ No newline at end of file diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index 40a59a7e6b7c6..018d53a2c5e89 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -43,9 +43,18 @@ information, see {stack-ov}/security-privileges.html[Security privileges] and ==== {api-query-parms-title} `allow_no_match`:: - (Optional, boolean) If `false` and the `data_frame_analytics_id` does not - match any {dfanalytics-job} an error will be returned. The default value is - `true`. + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dfanalytics-jobs} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `data_frame_analytics` array +when there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status code +when there are no matches or only partial matches. +-- `from`:: (Optional, integer) Skips the specified number of {dfanalytics-jobs}. The @@ -64,6 +73,13 @@ The API returns the following information: (array) An array of statistics objects for {dfanalytics-jobs}, which are sorted by the `id` value in ascending order. +[[ml-get-dfanalytics-stats-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. + [[ml-get-dfanalytics-stats-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index 12f603d94f7dd..09380a340310b 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -33,10 +33,7 @@ information, see {stack-ov}/security-privileges.html[Security privileges] and ==== {api-description-title} You can get information for multiple {dfanalytics-jobs} in a single API request -by using a comma-separated list of {dfanalytics-jobs} or a wildcard expression. -You can get information for all {dfanalytics-jobs} by using _all, by specifying -`*` as the ``, or by omitting the -``. +by using a comma-separated list of {dfanalytics-jobs} or a wildcard expression. [[ml-get-dfanalytics-path-params]] ==== {api-path-parms-title} @@ -44,27 +41,34 @@ You can get information for all {dfanalytics-jobs} by using _all, by specifying ``:: (Optional, string) Identifier for the {dfanalytics-job}. If you do not specify one of these options, the API returns information for the first hundred - {dfanalytics-jobs}. - -`allow_no_match` (Optional):: - (boolean) If `false` and the `data_frame_analytics_id` does not match any - {dfanalytics-job} an error will be returned. The default value is `true`. + {dfanalytics-jobs}. You can get information for all {dfanalytics-jobs} by + using _all, by specifying `*` as the ``, or by + omitting the ``. [[ml-get-dfanalytics-query-params]] ==== {api-query-parms-title} `allow_no_match`:: - (Optional, boolean) If `false` and the `data_frame_analytics_id` does not - match any {dfanalytics-job} an error will be returned. The default value is - `true`. + (Optional, boolean) Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no {dfanalytics-jobs} that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `data_frame_analytics` array +when there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status code +when there are no matches or only partial matches. +-- `from`:: (Optional, integer) Skips the specified number of {dfanalytics-jobs}. The default value is `0`. `size`:: - (Optional, integer) Specifies the maximum number of {dfanalytics-jobs} to obtain. The - default value is `100`. + (Optional, integer) Specifies the maximum number of {dfanalytics-jobs} to + obtain. The default value is `100`. [[ml-get-dfanalytics-results]] ==== {api-response-body-title} @@ -73,6 +77,13 @@ You can get information for all {dfanalytics-jobs} by using _all, by specifying (array) An array of {dfanalytics-job} resources. For more information, see <>. +[[ml-get-dfanalytics-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `allow_no_match` is `false`, this code indicates that there are no + resources that match the request or only partial matches for the request. + [[ml-get-dfanalytics-example]] ==== {api-examples-title} diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 72b3a37f743ee..e89711d23c2eb 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -67,12 +67,22 @@ and mappings. example, all the numeric fields for {oldetection}. `dest`:: - (Required, object) The destination configuration, consisting of `index` and optionally - `results_field` (`ml` by default). See <>. + (Required, object) The destination configuration, consisting of `index` and + optionally `results_field` (`ml` by default). See + <>. + +`model_memory_limit`:: + (Optional, string) The approximate maximum amount of memory resources that are + permitted for analytical processing. The default value for {dfanalytics-jobs} + is `1gb`. If your `elasticsearch.yml` file contains an + `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + create {dfanalytics-jobs} that have `model_memory_limit` values greater than + that setting. For more information, see <>. `source`:: - (Required, object) The source configuration, consisting of `index` and optionally a - `query`. See <>. + (Required, object) The source configuration, consisting of `index` and + optionally a `query`. See + <>. [[ml-put-dfanalytics-example]] ==== {api-examples-title} From 0f03de595696f0e6f0a2d2390d1dda41e3b8f721 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Fri, 26 Jul 2019 12:19:13 +0200 Subject: [PATCH 41/51] Implement exponential average search time per hour statistics. (#44683) --- .../ml/datafeed/DatafeedTimingStats.java | 33 ++- .../client/ml/job/process/TimingStats.java | 26 ++- .../ml/datafeed/DatafeedTimingStatsTests.java | 31 +-- .../ml/job/process/TimingStatsTests.java | 28 +-- .../core/ml/datafeed/DatafeedTimingStats.java | 86 +++++-- .../persistence/ElasticsearchMappings.java | 17 ++ .../process/autodetect/state/TimingStats.java | 64 +++++- .../ml/job/results/ReservedFieldNames.java | 7 + .../ExponentialAverageCalculationContext.java | 212 ++++++++++++++++++ .../GetDatafeedStatsActionResponseTests.java | 7 +- .../ml/datafeed/DatafeedTimingStatsTests.java | 80 ++++--- .../autodetect/state/TimingStatsTests.java | 62 ++--- ...nentialAverageCalculationContextTests.java | 125 +++++++++++ .../datafeed/DatafeedTimingStatsReporter.java | 8 +- .../job/persistence/TimingStatsReporter.java | 6 +- .../output/AutodetectResultProcessor.java | 2 +- .../DatafeedTimingStatsReporterTests.java | 92 +++++--- .../persistence/JobResultsPersisterTests.java | 22 +- .../persistence/JobResultsProviderTests.java | 42 +++- .../persistence/TimingStatsReporterTests.java | 112 ++++++--- .../AutodetectResultProcessorTests.java | 4 +- .../params/AutodetectParamsTests.java | 11 +- .../xpack/ml/job/results/BucketTests.java | 4 +- .../ml/JobStatsMonitoringDocTests.java | 7 +- 24 files changed, 859 insertions(+), 229 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExponentialAverageCalculationContext.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/ExponentialAverageCalculationContextTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java index 9f9215e5046fe..4e0c67ffd0f89 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStats.java @@ -39,6 +39,7 @@ public class DatafeedTimingStats implements ToXContentObject { public static final ParseField BUCKET_COUNT = new ParseField("bucket_count"); public static final ParseField TOTAL_SEARCH_TIME_MS = new ParseField("total_search_time_ms"); public static final ParseField AVG_SEARCH_TIME_PER_BUCKET_MS = new ParseField("average_search_time_per_bucket_ms"); + public static final ParseField EXPONENTIAL_AVG_SEARCH_TIME_PER_HOUR_MS = new ParseField("exponential_average_search_time_per_hour_ms"); public static final ParseField TYPE = new ParseField("datafeed_timing_stats"); @@ -55,18 +56,21 @@ private static ConstructingObjectParser createParser( Long bucketCount = (Long) args[2]; Double totalSearchTimeMs = (Double) args[3]; Double avgSearchTimePerBucketMs = (Double) args[4]; + Double exponentialAvgSearchTimePerHourMs = (Double) args[5]; return new DatafeedTimingStats( jobId, getOrDefault(searchCount, 0L), getOrDefault(bucketCount, 0L), getOrDefault(totalSearchTimeMs, 0.0), - avgSearchTimePerBucketMs); + avgSearchTimePerBucketMs, + exponentialAvgSearchTimePerHourMs); }); parser.declareString(constructorArg(), JOB_ID); parser.declareLong(optionalConstructorArg(), SEARCH_COUNT); parser.declareLong(optionalConstructorArg(), BUCKET_COUNT); parser.declareDouble(optionalConstructorArg(), TOTAL_SEARCH_TIME_MS); parser.declareDouble(optionalConstructorArg(), AVG_SEARCH_TIME_PER_BUCKET_MS); + parser.declareDouble(optionalConstructorArg(), EXPONENTIAL_AVG_SEARCH_TIME_PER_HOUR_MS); return parser; } @@ -75,14 +79,21 @@ private static ConstructingObjectParser createParser( private long bucketCount; private double totalSearchTimeMs; private Double avgSearchTimePerBucketMs; + private Double exponentialAvgSearchTimePerHourMs; public DatafeedTimingStats( - String jobId, long searchCount, long bucketCount, double totalSearchTimeMs, @Nullable Double avgSearchTimePerBucketMs) { + String jobId, + long searchCount, + long bucketCount, + double totalSearchTimeMs, + @Nullable Double avgSearchTimePerBucketMs, + @Nullable Double exponentialAvgSearchTimePerHourMs) { this.jobId = Objects.requireNonNull(jobId); this.searchCount = searchCount; this.bucketCount = bucketCount; this.totalSearchTimeMs = totalSearchTimeMs; this.avgSearchTimePerBucketMs = avgSearchTimePerBucketMs; + this.exponentialAvgSearchTimePerHourMs = exponentialAvgSearchTimePerHourMs; } public String getJobId() { @@ -105,6 +116,10 @@ public Double getAvgSearchTimePerBucketMs() { return avgSearchTimePerBucketMs; } + public Double getExponentialAvgSearchTimePerHourMs() { + return exponentialAvgSearchTimePerHourMs; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); @@ -115,6 +130,9 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par if (avgSearchTimePerBucketMs != null) { builder.field(AVG_SEARCH_TIME_PER_BUCKET_MS.getPreferredName(), avgSearchTimePerBucketMs); } + if (exponentialAvgSearchTimePerHourMs != null) { + builder.field(EXPONENTIAL_AVG_SEARCH_TIME_PER_HOUR_MS.getPreferredName(), exponentialAvgSearchTimePerHourMs); + } builder.endObject(); return builder; } @@ -133,12 +151,19 @@ public boolean equals(Object obj) { && this.searchCount == other.searchCount && this.bucketCount == other.bucketCount && this.totalSearchTimeMs == other.totalSearchTimeMs - && Objects.equals(this.avgSearchTimePerBucketMs, other.avgSearchTimePerBucketMs); + && Objects.equals(this.avgSearchTimePerBucketMs, other.avgSearchTimePerBucketMs) + && Objects.equals(this.exponentialAvgSearchTimePerHourMs, other.exponentialAvgSearchTimePerHourMs); } @Override public int hashCode() { - return Objects.hash(jobId, searchCount, bucketCount, totalSearchTimeMs, avgSearchTimePerBucketMs); + return Objects.hash( + jobId, + searchCount, + bucketCount, + totalSearchTimeMs, + avgSearchTimePerBucketMs, + exponentialAvgSearchTimePerHourMs); } @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java index 9493270c4b936..7e722ddc8273d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/TimingStats.java @@ -45,6 +45,8 @@ public class TimingStats implements ToXContentObject { public static final ParseField AVG_BUCKET_PROCESSING_TIME_MS = new ParseField("average_bucket_processing_time_ms"); public static final ParseField EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS = new ParseField("exponential_average_bucket_processing_time_ms"); + public static final ParseField EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_PER_HOUR_MS = + new ParseField("exponential_average_bucket_processing_time_per_hour_ms"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -58,6 +60,7 @@ public class TimingStats implements ToXContentObject { Double maxBucketProcessingTimeMs = (Double) args[4]; Double avgBucketProcessingTimeMs = (Double) args[5]; Double exponentialAvgBucketProcessingTimeMs = (Double) args[6]; + Double exponentialAvgBucketProcessingTimePerHourMs = (Double) args[7]; return new TimingStats( jobId, getOrDefault(bucketCount, 0L), @@ -65,7 +68,8 @@ public class TimingStats implements ToXContentObject { minBucketProcessingTimeMs, maxBucketProcessingTimeMs, avgBucketProcessingTimeMs, - exponentialAvgBucketProcessingTimeMs); + exponentialAvgBucketProcessingTimeMs, + exponentialAvgBucketProcessingTimePerHourMs); }); static { @@ -76,6 +80,7 @@ public class TimingStats implements ToXContentObject { PARSER.declareDouble(optionalConstructorArg(), MAX_BUCKET_PROCESSING_TIME_MS); PARSER.declareDouble(optionalConstructorArg(), AVG_BUCKET_PROCESSING_TIME_MS); PARSER.declareDouble(optionalConstructorArg(), EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS); + PARSER.declareDouble(optionalConstructorArg(), EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_PER_HOUR_MS); } private final String jobId; @@ -85,6 +90,7 @@ public class TimingStats implements ToXContentObject { private Double maxBucketProcessingTimeMs; private Double avgBucketProcessingTimeMs; private Double exponentialAvgBucketProcessingTimeMs; + private Double exponentialAvgBucketProcessingTimePerHourMs; public TimingStats( String jobId, @@ -93,7 +99,8 @@ public TimingStats( @Nullable Double minBucketProcessingTimeMs, @Nullable Double maxBucketProcessingTimeMs, @Nullable Double avgBucketProcessingTimeMs, - @Nullable Double exponentialAvgBucketProcessingTimeMs) { + @Nullable Double exponentialAvgBucketProcessingTimeMs, + @Nullable Double exponentialAvgBucketProcessingTimePerHourMs) { this.jobId = jobId; this.bucketCount = bucketCount; this.totalBucketProcessingTimeMs = totalBucketProcessingTimeMs; @@ -101,6 +108,7 @@ public TimingStats( this.maxBucketProcessingTimeMs = maxBucketProcessingTimeMs; this.avgBucketProcessingTimeMs = avgBucketProcessingTimeMs; this.exponentialAvgBucketProcessingTimeMs = exponentialAvgBucketProcessingTimeMs; + this.exponentialAvgBucketProcessingTimePerHourMs = exponentialAvgBucketProcessingTimePerHourMs; } public String getJobId() { @@ -131,6 +139,10 @@ public Double getExponentialAvgBucketProcessingTimeMs() { return exponentialAvgBucketProcessingTimeMs; } + public Double getExponentialAvgBucketProcessingTimePerHourMs() { + return exponentialAvgBucketProcessingTimePerHourMs; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); @@ -149,6 +161,10 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par if (exponentialAvgBucketProcessingTimeMs != null) { builder.field(EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), exponentialAvgBucketProcessingTimeMs); } + if (exponentialAvgBucketProcessingTimePerHourMs != null) { + builder.field( + EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_PER_HOUR_MS.getPreferredName(), exponentialAvgBucketProcessingTimePerHourMs); + } builder.endObject(); return builder; } @@ -164,7 +180,8 @@ public boolean equals(Object o) { && Objects.equals(this.minBucketProcessingTimeMs, that.minBucketProcessingTimeMs) && Objects.equals(this.maxBucketProcessingTimeMs, that.maxBucketProcessingTimeMs) && Objects.equals(this.avgBucketProcessingTimeMs, that.avgBucketProcessingTimeMs) - && Objects.equals(this.exponentialAvgBucketProcessingTimeMs, that.exponentialAvgBucketProcessingTimeMs); + && Objects.equals(this.exponentialAvgBucketProcessingTimeMs, that.exponentialAvgBucketProcessingTimeMs) + && Objects.equals(this.exponentialAvgBucketProcessingTimePerHourMs, that.exponentialAvgBucketProcessingTimePerHourMs); } @Override @@ -176,7 +193,8 @@ public int hashCode() { minBucketProcessingTimeMs, maxBucketProcessingTimeMs, avgBucketProcessingTimeMs, - exponentialAvgBucketProcessingTimeMs); + exponentialAvgBucketProcessingTimeMs, + exponentialAvgBucketProcessingTimePerHourMs); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java index cde92b78f6c16..bf3c9581890c3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedTimingStatsTests.java @@ -35,7 +35,12 @@ public class DatafeedTimingStatsTests extends AbstractXContentTestCase createParser( Long searchCount = (Long) args[1]; Long bucketCount = (Long) args[2]; Double totalSearchTimeMs = (Double) args[3]; + ExponentialAverageCalculationContext exponentialAvgCalculationContext = (ExponentialAverageCalculationContext) args[4]; return new DatafeedTimingStats( - jobId, getOrDefault(searchCount, 0L), getOrDefault(bucketCount, 0L), getOrDefault(totalSearchTimeMs, 0.0)); + jobId, + getOrDefault(searchCount, 0L), + getOrDefault(bucketCount, 0L), + getOrDefault(totalSearchTimeMs, 0.0), + getOrDefault(exponentialAvgCalculationContext, new ExponentialAverageCalculationContext())); }); parser.declareString(constructorArg(), JOB_ID); parser.declareLong(optionalConstructorArg(), SEARCH_COUNT); parser.declareLong(optionalConstructorArg(), BUCKET_COUNT); parser.declareDouble(optionalConstructorArg(), TOTAL_SEARCH_TIME_MS); + parser.declareObject(optionalConstructorArg(), ExponentialAverageCalculationContext.PARSER, EXPONENTIAL_AVG_CALCULATION_CONTEXT); return parser; } @@ -64,27 +74,40 @@ public static String documentId(String jobId) { private long searchCount; private long bucketCount; private double totalSearchTimeMs; - - public DatafeedTimingStats(String jobId, long searchCount, long bucketCount, double totalSearchTimeMs) { + private final ExponentialAverageCalculationContext exponentialAvgCalculationContext; + + public DatafeedTimingStats( + String jobId, + long searchCount, + long bucketCount, + double totalSearchTimeMs, + ExponentialAverageCalculationContext exponentialAvgCalculationContext) { this.jobId = Objects.requireNonNull(jobId); this.searchCount = searchCount; this.bucketCount = bucketCount; this.totalSearchTimeMs = totalSearchTimeMs; + this.exponentialAvgCalculationContext = Objects.requireNonNull(exponentialAvgCalculationContext); } public DatafeedTimingStats(String jobId) { - this(jobId, 0, 0, 0.0); + this(jobId, 0, 0, 0.0, new ExponentialAverageCalculationContext()); } public DatafeedTimingStats(StreamInput in) throws IOException { - jobId = in.readString(); - searchCount = in.readLong(); - bucketCount = in.readLong(); - totalSearchTimeMs = in.readDouble(); + this.jobId = in.readString(); + this.searchCount = in.readLong(); + this.bucketCount = in.readLong(); + this.totalSearchTimeMs = in.readDouble(); + this.exponentialAvgCalculationContext = in.readOptionalWriteable(ExponentialAverageCalculationContext::new); } public DatafeedTimingStats(DatafeedTimingStats other) { - this(other.jobId, other.searchCount, other.bucketCount, other.totalSearchTimeMs); + this( + other.jobId, + other.searchCount, + other.bucketCount, + other.totalSearchTimeMs, + new ExponentialAverageCalculationContext(other.exponentialAvgCalculationContext)); } public String getJobId() { @@ -104,26 +127,40 @@ public double getTotalSearchTimeMs() { } public Double getAvgSearchTimePerBucketMs() { - return bucketCount > 0 - ? totalSearchTimeMs / bucketCount - : null; + if (bucketCount == 0) return null; + return totalSearchTimeMs / bucketCount; + } + + public Double getExponentialAvgSearchTimePerHourMs() { + return exponentialAvgCalculationContext.getCurrentExponentialAverageMs(); + } + + // Visible for testing + ExponentialAverageCalculationContext getExponentialAvgCalculationContext() { + return exponentialAvgCalculationContext; } - public void incrementTotalSearchTimeMs(double searchTimeMs) { + public void incrementSearchTimeMs(double searchTimeMs) { this.searchCount++; this.totalSearchTimeMs += searchTimeMs; + this.exponentialAvgCalculationContext.increment(searchTimeMs); } public void incrementBucketCount(long bucketCount) { this.bucketCount += bucketCount; } + public void setLatestRecordTimestamp(Instant latestRecordTimestamp) { + this.exponentialAvgCalculationContext.setLatestTimestamp(latestRecordTimestamp); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(jobId); out.writeLong(searchCount); out.writeLong(bucketCount); out.writeDouble(totalSearchTimeMs); + out.writeOptionalWriteable(exponentialAvgCalculationContext); } @Override @@ -137,11 +174,18 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); builder.field(TOTAL_SEARCH_TIME_MS.getPreferredName(), totalSearchTimeMs); if (params.paramAsBoolean(ToXContentParams.INCLUDE_CALCULATED_FIELDS, false)) { - Double avgSearchTimePerBucket = getAvgSearchTimePerBucketMs(); - if (avgSearchTimePerBucket != null) { - builder.field(AVG_SEARCH_TIME_PER_BUCKET_MS.getPreferredName(), getAvgSearchTimePerBucketMs()); + Double avgSearchTimePerBucketMs = getAvgSearchTimePerBucketMs(); + if (avgSearchTimePerBucketMs != null) { + builder.field(AVG_SEARCH_TIME_PER_BUCKET_MS.getPreferredName(), avgSearchTimePerBucketMs); + } + Double expAvgSearchTimePerHourMs = getExponentialAvgSearchTimePerHourMs(); + if (expAvgSearchTimePerHourMs != null) { + builder.field(EXPONENTIAL_AVG_SEARCH_TIME_PER_HOUR_MS.getPreferredName(), expAvgSearchTimePerHourMs); } } + if (params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) { + builder.field(EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), exponentialAvgCalculationContext); + } builder.endObject(); return builder; } @@ -159,12 +203,18 @@ public boolean equals(Object obj) { return Objects.equals(this.jobId, other.jobId) && this.searchCount == other.searchCount && this.bucketCount == other.bucketCount - && this.totalSearchTimeMs == other.totalSearchTimeMs; + && this.totalSearchTimeMs == other.totalSearchTimeMs + && Objects.equals(this.exponentialAvgCalculationContext, other.exponentialAvgCalculationContext); } @Override public int hashCode() { - return Objects.hash(jobId, searchCount, bucketCount, totalSearchTimeMs); + return Objects.hash( + jobId, + searchCount, + bucketCount, + totalSearchTimeMs, + exponentialAvgCalculationContext); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 6981772066b96..baf655a280d67 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -58,6 +58,7 @@ import org.elasticsearch.xpack.core.ml.job.results.ReservedFieldNames; import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import java.io.IOException; import java.util.ArrayList; @@ -931,12 +932,27 @@ private static void addTimingStatsExceptBucketCountMapping(XContentBuilder build .endObject() .startObject(TimingStats.EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName()) .field(TYPE, DOUBLE) + .endObject() + .startObject(TimingStats.EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName()) + .startObject(PROPERTIES) + .startObject(ExponentialAverageCalculationContext.INCREMENTAL_METRIC_VALUE_MS.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(ExponentialAverageCalculationContext.LATEST_TIMESTAMP.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(ExponentialAverageCalculationContext.PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .endObject() .endObject(); } /** * {@link DatafeedTimingStats} mapping. * Does not include mapping for BUCKET_COUNT as this mapping is added by {@link #addDataCountsMapping} method. + * Does not include mapping for EXPONENTIAL_AVG_CALCULATION_CONTEXT as this mapping is added by + * {@link #addTimingStatsExceptBucketCountMapping} method. * * @throws IOException On builder write error */ @@ -948,6 +964,7 @@ private static void addDatafeedTimingStats(XContentBuilder builder) throws IOExc // re-used: BUCKET_COUNT .startObject(DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName()) .field(TYPE, DOUBLE) + // re-used: EXPONENTIAL_AVG_CALCULATION_CONTEXT .endObject(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java index a99260e668685..fbf08cbc6d760 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/TimingStats.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -16,9 +17,11 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; +import java.time.Instant; import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -36,6 +39,9 @@ public class TimingStats implements ToXContentObject, Writeable { public static final ParseField AVG_BUCKET_PROCESSING_TIME_MS = new ParseField("average_bucket_processing_time_ms"); public static final ParseField EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS = new ParseField("exponential_average_bucket_processing_time_ms"); + public static final ParseField EXPONENTIAL_AVG_CALCULATION_CONTEXT = new ParseField("exponential_average_calculation_context"); + public static final ParseField EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_PER_HOUR_MS = + new ParseField("exponential_average_bucket_processing_time_per_hour_ms"); public static final ParseField TYPE = new ParseField("timing_stats"); @@ -50,13 +56,15 @@ public class TimingStats implements ToXContentObject, Writeable { Double maxBucketProcessingTimeMs = (Double) args[3]; Double avgBucketProcessingTimeMs = (Double) args[4]; Double exponentialAvgBucketProcessingTimeMs = (Double) args[5]; + ExponentialAverageCalculationContext exponentialAvgCalculationContext = (ExponentialAverageCalculationContext) args[6]; return new TimingStats( jobId, bucketCount, minBucketProcessingTimeMs, maxBucketProcessingTimeMs, avgBucketProcessingTimeMs, - exponentialAvgBucketProcessingTimeMs); + exponentialAvgBucketProcessingTimeMs, + getOrDefault(exponentialAvgCalculationContext, new ExponentialAverageCalculationContext())); }); static { @@ -66,6 +74,7 @@ public class TimingStats implements ToXContentObject, Writeable { PARSER.declareDouble(optionalConstructorArg(), MAX_BUCKET_PROCESSING_TIME_MS); PARSER.declareDouble(optionalConstructorArg(), AVG_BUCKET_PROCESSING_TIME_MS); PARSER.declareDouble(optionalConstructorArg(), EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS); + PARSER.declareObject(optionalConstructorArg(), ExponentialAverageCalculationContext.PARSER, EXPONENTIAL_AVG_CALCULATION_CONTEXT); } public static String documentId(String jobId) { @@ -78,6 +87,7 @@ public static String documentId(String jobId) { private Double maxBucketProcessingTimeMs; private Double avgBucketProcessingTimeMs; private Double exponentialAvgBucketProcessingTimeMs; + private final ExponentialAverageCalculationContext exponentialAvgCalculationContext; public TimingStats( String jobId, @@ -85,17 +95,19 @@ public TimingStats( @Nullable Double minBucketProcessingTimeMs, @Nullable Double maxBucketProcessingTimeMs, @Nullable Double avgBucketProcessingTimeMs, - @Nullable Double exponentialAvgBucketProcessingTimeMs) { - this.jobId = jobId; + @Nullable Double exponentialAvgBucketProcessingTimeMs, + ExponentialAverageCalculationContext exponentialAvgCalculationContext) { + this.jobId = Objects.requireNonNull(jobId); this.bucketCount = bucketCount; this.minBucketProcessingTimeMs = minBucketProcessingTimeMs; this.maxBucketProcessingTimeMs = maxBucketProcessingTimeMs; this.avgBucketProcessingTimeMs = avgBucketProcessingTimeMs; this.exponentialAvgBucketProcessingTimeMs = exponentialAvgBucketProcessingTimeMs; + this.exponentialAvgCalculationContext = Objects.requireNonNull(exponentialAvgCalculationContext); } public TimingStats(String jobId) { - this(jobId, 0, null, null, null, null); + this(jobId, 0, null, null, null, null, new ExponentialAverageCalculationContext()); } public TimingStats(TimingStats lhs) { @@ -105,7 +117,8 @@ public TimingStats(TimingStats lhs) { lhs.minBucketProcessingTimeMs, lhs.maxBucketProcessingTimeMs, lhs.avgBucketProcessingTimeMs, - lhs.exponentialAvgBucketProcessingTimeMs); + lhs.exponentialAvgBucketProcessingTimeMs, + new ExponentialAverageCalculationContext(lhs.exponentialAvgCalculationContext)); } public TimingStats(StreamInput in) throws IOException { @@ -115,6 +128,11 @@ public TimingStats(StreamInput in) throws IOException { this.maxBucketProcessingTimeMs = in.readOptionalDouble(); this.avgBucketProcessingTimeMs = in.readOptionalDouble(); this.exponentialAvgBucketProcessingTimeMs = in.readOptionalDouble(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { // TODO: Change to V_7_4_0 after backport + this.exponentialAvgCalculationContext = in.readOptionalWriteable(ExponentialAverageCalculationContext::new); + } else { + this.exponentialAvgCalculationContext = new ExponentialAverageCalculationContext(); + } } public String getJobId() { @@ -148,6 +166,15 @@ public Double getExponentialAvgBucketProcessingTimeMs() { return exponentialAvgBucketProcessingTimeMs; } + public Double getExponentialAvgBucketProcessingTimePerHourMs() { + return exponentialAvgCalculationContext.getCurrentExponentialAverageMs(); + } + + // Visible for testing + ExponentialAverageCalculationContext getExponentialAvgCalculationContext() { + return exponentialAvgCalculationContext; + } + /** * Updates the statistics (min, max, avg, exponential avg) for the given data point (bucket processing time). */ @@ -176,6 +203,11 @@ public void updateStats(double bucketProcessingTimeMs) { exponentialAvgBucketProcessingTimeMs = (1 - ALPHA) * exponentialAvgBucketProcessingTimeMs + ALPHA * bucketProcessingTimeMs; } bucketCount++; + exponentialAvgCalculationContext.increment(bucketProcessingTimeMs); + } + + public void setLatestRecordTimestamp(Instant latestRecordTimestamp) { + exponentialAvgCalculationContext.setLatestTimestamp(latestRecordTimestamp); } /** @@ -191,6 +223,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalDouble(maxBucketProcessingTimeMs); out.writeOptionalDouble(avgBucketProcessingTimeMs); out.writeOptionalDouble(exponentialAvgBucketProcessingTimeMs); + if (out.getVersion().onOrAfter(Version.CURRENT)) { // TODO: Change to V_7_4_0 after backport + out.writeOptionalWriteable(exponentialAvgCalculationContext); + } } @Override @@ -216,6 +251,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (exponentialAvgBucketProcessingTimeMs != null) { builder.field(EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), exponentialAvgBucketProcessingTimeMs); } + if (params.paramAsBoolean(ToXContentParams.INCLUDE_CALCULATED_FIELDS, false)) { + Double expAvgBucketProcessingTimePerHourMs = getExponentialAvgBucketProcessingTimePerHourMs(); + if (expAvgBucketProcessingTimePerHourMs != null) { + builder.field(EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_PER_HOUR_MS.getPreferredName(), expAvgBucketProcessingTimePerHourMs); + } + } + if (params.paramAsBoolean(ToXContentParams.FOR_INTERNAL_STORAGE, false)) { + builder.field(EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), exponentialAvgCalculationContext); + } builder.endObject(); return builder; } @@ -230,7 +274,8 @@ public boolean equals(Object o) { && Objects.equals(this.minBucketProcessingTimeMs, that.minBucketProcessingTimeMs) && Objects.equals(this.maxBucketProcessingTimeMs, that.maxBucketProcessingTimeMs) && Objects.equals(this.avgBucketProcessingTimeMs, that.avgBucketProcessingTimeMs) - && Objects.equals(this.exponentialAvgBucketProcessingTimeMs, that.exponentialAvgBucketProcessingTimeMs); + && Objects.equals(this.exponentialAvgBucketProcessingTimeMs, that.exponentialAvgBucketProcessingTimeMs) + && Objects.equals(this.exponentialAvgCalculationContext, that.exponentialAvgCalculationContext); } @Override @@ -241,11 +286,16 @@ public int hashCode() { minBucketProcessingTimeMs, maxBucketProcessingTimeMs, avgBucketProcessingTimeMs, - exponentialAvgBucketProcessingTimeMs); + exponentialAvgBucketProcessingTimeMs, + exponentialAvgCalculationContext); } @Override public String toString() { return Strings.toString(this); } + + private static T getOrDefault(@Nullable T value, T defaultValue) { + return value != null ? value : defaultValue; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index 51717c6bad2d0..76860e28481ce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotField; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import java.util.Arrays; import java.util.HashSet; @@ -185,10 +186,16 @@ public final class ReservedFieldNames { TimingStats.MAX_BUCKET_PROCESSING_TIME_MS.getPreferredName(), TimingStats.AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), TimingStats.EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), + TimingStats.EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), + DatafeedTimingStats.EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), + + ExponentialAverageCalculationContext.INCREMENTAL_METRIC_VALUE_MS.getPreferredName(), + ExponentialAverageCalculationContext.LATEST_TIMESTAMP.getPreferredName(), + ExponentialAverageCalculationContext.PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName(), GetResult._ID, GetResult._INDEX, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExponentialAverageCalculationContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExponentialAverageCalculationContext.java new file mode 100644 index 0000000000000..25a93a2fe5b5e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExponentialAverageCalculationContext.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.common.time.TimeUtils; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.time.temporal.TemporalUnit; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Utility for calculating current value of exponentially-weighted moving average per fixed-sized time window. + * + * The formula for the current value of the exponentially-weighted moving average is: + * + * currentExponentialAverageMs = alpha * previousExponentialAverageMs + (1 - alpha) * incrementalMetricValueMs + * + * where alpha depends on what fraction of the current time window we've already seen: + * + * alpha = e^(-time_elapsed_since_window_start/window_size) + * time_elapsed_since_window_start = latestTimestamp - window_start + * + * The class holds 3 values based on which it performs the calculation: + * - incrementalMetricValueMs - accumulated value of the metric in the current time window + * - latestTimestamp - timestamp updated as the time passes through the current time window + * - previousExponentialAverageMs - exponential average for previous time windows + * + * incrementalMetricValueMs should be updated using {@link #increment}. + * latestTimestamp should be updated using {@link #setLatestTimestamp}. + * Because it can happen that the timestamp is not available while incrementing the metric value, it is the responsibility of the user + * of this class to always call {@link #setLatestTimestamp} *after* all the relevant (i.e. referring to the points in time before the + * latest timestamp mentioned) {@link #increment} calls are made. + */ +public class ExponentialAverageCalculationContext implements Writeable, ToXContentObject { + + public static final ParseField INCREMENTAL_METRIC_VALUE_MS = new ParseField("incremental_metric_value_ms"); + public static final ParseField LATEST_TIMESTAMP = new ParseField("latest_timestamp"); + public static final ParseField PREVIOUS_EXPONENTIAL_AVERAGE_MS = new ParseField("previous_exponential_average_ms"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "exponential_average_calculation_context", + true, + args -> { + Double incrementalMetricValueMs = (Double) args[0]; + Instant latestTimestamp = (Instant) args[1]; + Double previousExponentialAverageMs = (Double) args[2]; + return new ExponentialAverageCalculationContext( + getOrDefault(incrementalMetricValueMs, 0.0), + latestTimestamp, + previousExponentialAverageMs); + }); + + static { + PARSER.declareDouble(optionalConstructorArg(), INCREMENTAL_METRIC_VALUE_MS); + PARSER.declareField( + optionalConstructorArg(), + p -> TimeUtils.parseTimeFieldToInstant(p, LATEST_TIMESTAMP.getPreferredName()), + LATEST_TIMESTAMP, + ObjectParser.ValueType.VALUE); + PARSER.declareDouble(optionalConstructorArg(), PREVIOUS_EXPONENTIAL_AVERAGE_MS); + } + + private static final TemporalUnit WINDOW_UNIT = ChronoUnit.HOURS; + private static final Duration WINDOW_SIZE = WINDOW_UNIT.getDuration(); + + private double incrementalMetricValueMs; + private Instant latestTimestamp; + private Double previousExponentialAverageMs; + + public ExponentialAverageCalculationContext() { + this(0.0, null, null); + } + + public ExponentialAverageCalculationContext( + double incrementalMetricValueMs, + @Nullable Instant latestTimestamp, + @Nullable Double previousExponentialAverageMs) { + this.incrementalMetricValueMs = incrementalMetricValueMs; + this.latestTimestamp = latestTimestamp != null ? Instant.ofEpochMilli(latestTimestamp.toEpochMilli()) : null; + this.previousExponentialAverageMs = previousExponentialAverageMs; + } + + public ExponentialAverageCalculationContext(ExponentialAverageCalculationContext lhs) { + this(lhs.incrementalMetricValueMs, lhs.latestTimestamp, lhs.previousExponentialAverageMs); + } + + public ExponentialAverageCalculationContext(StreamInput in) throws IOException { + this.incrementalMetricValueMs = in.readDouble(); + this.latestTimestamp = in.readOptionalInstant(); + this.previousExponentialAverageMs = in.readOptionalDouble(); + } + + // Visible for testing + public double getIncrementalMetricValueMs() { + return incrementalMetricValueMs; + } + + // Visible for testing + public Instant getLatestTimestamp() { + return latestTimestamp; + } + + // Visible for testing + public Double getPreviousExponentialAverageMs() { + return previousExponentialAverageMs; + } + + public Double getCurrentExponentialAverageMs() { + if (previousExponentialAverageMs == null || latestTimestamp == null) return incrementalMetricValueMs; + Instant currentWindowStartTimestamp = latestTimestamp.truncatedTo(WINDOW_UNIT); + double alpha = Math.exp( + - (double) Duration.between(currentWindowStartTimestamp, latestTimestamp).toMillis() / WINDOW_SIZE.toMillis()); + return alpha * previousExponentialAverageMs + (1 - alpha) * incrementalMetricValueMs; + } + + /** + * Increments the current accumulated metric value by the given delta. + */ + public void increment(double metricValueDeltaMs) { + incrementalMetricValueMs += metricValueDeltaMs; + } + + /** + * Sets the latest timestamp that serves as an indication of the current point in time. + * Before calling this method make sure all the associated calls to {@link #increment} were already made. + */ + public void setLatestTimestamp(Instant newLatestTimestamp) { + Objects.requireNonNull(newLatestTimestamp); + if (this.latestTimestamp != null) { + Instant nextWindowStartTimestamp = this.latestTimestamp.truncatedTo(WINDOW_UNIT).plus(WINDOW_SIZE); + if (newLatestTimestamp.compareTo(nextWindowStartTimestamp) >= 0) { + // When we cross the boundary between windows, we update the exponential average with metric values accumulated so far in + // incrementalMetricValueMs variable. + this.previousExponentialAverageMs = getCurrentExponentialAverageMs(); + this.incrementalMetricValueMs = 0.0; + } + } else { + // This is the first time {@link #setLatestRecordTimestamp} is called on this object. + } + if (this.latestTimestamp == null || newLatestTimestamp.isAfter(this.latestTimestamp)) { + this.latestTimestamp = newLatestTimestamp; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(incrementalMetricValueMs); + out.writeOptionalInstant(latestTimestamp); + out.writeOptionalDouble(previousExponentialAverageMs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INCREMENTAL_METRIC_VALUE_MS.getPreferredName(), incrementalMetricValueMs); + if (latestTimestamp != null) { + builder.timeField( + LATEST_TIMESTAMP.getPreferredName(), + LATEST_TIMESTAMP.getPreferredName() + "_string", + latestTimestamp.toEpochMilli()); + } + if (previousExponentialAverageMs != null) { + builder.field(PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName(), previousExponentialAverageMs); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + ExponentialAverageCalculationContext that = (ExponentialAverageCalculationContext) o; + return this.incrementalMetricValueMs == that.incrementalMetricValueMs + && Objects.equals(this.latestTimestamp, that.latestTimestamp) + && Objects.equals(this.previousExponentialAverageMs, that.previousExponentialAverageMs); + } + + @Override + public int hashCode() { + return Objects.hash(incrementalMetricValueMs, latestTimestamp, previousExponentialAverageMs); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @SuppressWarnings("unchecked") + private static T getOrDefault(@Nullable T value, T defaultValue) { + return value != null ? value : defaultValue; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java index 74bc71074260e..cc9bbabd12326 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStatsTests; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import java.io.IOException; import java.net.InetAddress; @@ -78,7 +79,8 @@ public void testDatafeedStatsToXContent() throws IOException { Set.of(), Version.CURRENT); - DatafeedTimingStats timingStats = new DatafeedTimingStats("my-job-id", 5, 10, 100.0); + DatafeedTimingStats timingStats = + new DatafeedTimingStats("my-job-id", 5, 10, 100.0, new ExponentialAverageCalculationContext(50.0, null, null)); Response.DatafeedStats stats = new Response.DatafeedStats("df-id", DatafeedState.STARTED, node, null, timingStats); @@ -110,11 +112,12 @@ public void testDatafeedStatsToXContent() throws IOException { assertThat(nodeAttributes, hasEntry("ml.max_open_jobs", "5")); Map timingStatsMap = (Map) dfStatsMap.get("timing_stats"); - assertThat(timingStatsMap.size(), is(equalTo(5))); + assertThat(timingStatsMap.size(), is(equalTo(6))); assertThat(timingStatsMap, hasEntry("job_id", "my-job-id")); assertThat(timingStatsMap, hasEntry("search_count", 5)); assertThat(timingStatsMap, hasEntry("bucket_count", 10)); assertThat(timingStatsMap, hasEntry("total_search_time_ms", 100.0)); assertThat(timingStatsMap, hasEntry("average_search_time_per_bucket_ms", 10.0)); + assertThat(timingStatsMap, hasEntry("exponential_average_search_time_per_hour_ms", 50.0)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java index e8d7798ba6cc0..9ca522074e4d3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedTimingStatsTests.java @@ -7,12 +7,18 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContextTests; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; +import java.time.Instant; +import java.util.Collections; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -23,7 +29,12 @@ public class DatafeedTimingStatsTests extends AbstractSerializingTestCase { + + public static ExponentialAverageCalculationContext createRandom() { + return new ExponentialAverageCalculationContext( + randomDouble(), + randomBoolean() ? Instant.now() : null, + randomBoolean() ? randomDouble() : null); + } + + @Override + public ExponentialAverageCalculationContext createTestInstance() { + return createRandom(); + } + + @Override + protected Writeable.Reader instanceReader() { + return ExponentialAverageCalculationContext::new; + } + + @Override + protected ExponentialAverageCalculationContext doParseInstance(XContentParser parser) { + return ExponentialAverageCalculationContext.PARSER.apply(parser, null); + } + + public void testDefaultConstructor() { + ExponentialAverageCalculationContext context = new ExponentialAverageCalculationContext(); + + assertThat(context.getIncrementalMetricValueMs(), equalTo(0.0)); + assertThat(context.getLatestTimestamp(), nullValue()); + assertThat(context.getPreviousExponentialAverageMs(), nullValue()); + } + + public void testConstructor() { + ExponentialAverageCalculationContext context = + new ExponentialAverageCalculationContext(1.23, Instant.ofEpochMilli(123456789), 4.56); + + assertThat(context.getIncrementalMetricValueMs(), equalTo(1.23)); + assertThat(context.getLatestTimestamp(), equalTo(Instant.ofEpochMilli(123456789))); + assertThat(context.getPreviousExponentialAverageMs(), equalTo(4.56)); + } + + public void testCopyConstructor() { + ExponentialAverageCalculationContext context1 = + new ExponentialAverageCalculationContext(1.23, Instant.ofEpochMilli(123456789), 4.56); + ExponentialAverageCalculationContext context2 = new ExponentialAverageCalculationContext(context1); + + assertThat(context2.getIncrementalMetricValueMs(), equalTo(1.23)); + assertThat(context2.getLatestTimestamp(), equalTo(Instant.ofEpochMilli(123456789))); + assertThat(context2.getPreviousExponentialAverageMs(), equalTo(4.56)); + assertThat(context2.getCurrentExponentialAverageMs(), equalTo(context1.getCurrentExponentialAverageMs())); + } + + public void testExponentialAverageCalculation() { + ExponentialAverageCalculationContext context = new ExponentialAverageCalculationContext(0.0, null, null); + assertThat(context.getIncrementalMetricValueMs(), equalTo(0.0)); + assertThat(context.getLatestTimestamp(), nullValue()); + assertThat(context.getPreviousExponentialAverageMs(), nullValue()); + assertThat(context.getCurrentExponentialAverageMs(), equalTo(0.0)); + + context.increment(100.0); + context.increment(100.0); + context.increment(100.0); + assertThat(context.getIncrementalMetricValueMs(), equalTo(300.0)); + assertThat(context.getLatestTimestamp(), nullValue()); + assertThat(context.getPreviousExponentialAverageMs(), nullValue()); + assertThat(context.getCurrentExponentialAverageMs(), equalTo(300.0)); + + context.setLatestTimestamp(Instant.parse("2019-07-19T03:30:00.00Z")); + assertThat(context.getIncrementalMetricValueMs(), equalTo(300.0)); + assertThat(context.getLatestTimestamp(), equalTo(Instant.parse("2019-07-19T03:30:00.00Z"))); + assertThat(context.getPreviousExponentialAverageMs(), nullValue()); + assertThat(context.getCurrentExponentialAverageMs(), equalTo(300.0)); + + context.increment(200.0); + assertThat(context.getIncrementalMetricValueMs(), equalTo(500.0)); + assertThat(context.getLatestTimestamp(), equalTo(Instant.parse("2019-07-19T03:30:00.00Z"))); + assertThat(context.getPreviousExponentialAverageMs(), nullValue()); + assertThat(context.getCurrentExponentialAverageMs(), equalTo(500.0)); + + context.setLatestTimestamp(Instant.parse("2019-07-19T04:00:01.00Z")); + assertThat(context.getIncrementalMetricValueMs(), equalTo(0.0)); + assertThat(context.getLatestTimestamp(), equalTo(Instant.parse("2019-07-19T04:00:01.00Z"))); + assertThat(context.getPreviousExponentialAverageMs(), equalTo(500.0)); + assertThat(context.getCurrentExponentialAverageMs(), closeTo(499.8, 0.1)); + + context.increment(1000.0); + context.setLatestTimestamp(Instant.parse("2019-07-19T04:30:00.00Z")); + assertThat(context.getIncrementalMetricValueMs(), equalTo(1000.0)); + assertThat(context.getLatestTimestamp(), equalTo(Instant.parse("2019-07-19T04:30:00.00Z"))); + assertThat(context.getPreviousExponentialAverageMs(), equalTo(500.0)); + assertThat(context.getCurrentExponentialAverageMs(), closeTo(696.7, 0.1)); + } + + public void testExponentialAverageCalculationOnWindowBoundary() { + ExponentialAverageCalculationContext context = + new ExponentialAverageCalculationContext(500.0, Instant.parse("2019-07-19T04:25:06.00Z"), 200.0); + assertThat(context.getIncrementalMetricValueMs(), equalTo(500.0)); + assertThat(context.getLatestTimestamp(), equalTo(Instant.parse("2019-07-19T04:25:06.00Z"))); + assertThat(context.getPreviousExponentialAverageMs(), equalTo(200.0)); + assertThat(context.getCurrentExponentialAverageMs(), closeTo(302.5, 0.1)); + + context.setLatestTimestamp(Instant.parse("2019-07-19T05:00:00.00Z")); + assertThat(context.getIncrementalMetricValueMs(), equalTo(0.0)); + assertThat(context.getLatestTimestamp(), equalTo(Instant.parse("2019-07-19T05:00:00.00Z"))); + assertThat(context.getPreviousExponentialAverageMs(), closeTo(302.5, 0.1)); + assertThat(context.getCurrentExponentialAverageMs(), equalTo(context.getPreviousExponentialAverageMs())); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java index fbb32395f14ef..7df3919c459b3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java @@ -46,7 +46,7 @@ public void reportSearchDuration(TimeValue searchDuration) { if (searchDuration == null) { return; } - currentTimingStats.incrementTotalSearchTimeMs(searchDuration.millis()); + currentTimingStats.incrementSearchTimeMs(searchDuration.millis()); flushIfDifferSignificantly(); } @@ -58,6 +58,9 @@ public void reportDataCounts(DataCounts dataCounts) { return; } currentTimingStats.incrementBucketCount(dataCounts.getBucketCount()); + if (dataCounts.getLatestRecordTimeStamp() != null) { + currentTimingStats.setLatestRecordTimestamp(dataCounts.getLatestRecordTimeStamp().toInstant()); + } flushIfDifferSignificantly(); } @@ -79,7 +82,8 @@ private void flush() { public static boolean differSignificantly(DatafeedTimingStats stats1, DatafeedTimingStats stats2) { return countsDifferSignificantly(stats1.getSearchCount(), stats2.getSearchCount()) || differSignificantly(stats1.getTotalSearchTimeMs(), stats2.getTotalSearchTimeMs()) - || differSignificantly(stats1.getAvgSearchTimePerBucketMs(), stats2.getAvgSearchTimePerBucketMs()); + || differSignificantly(stats1.getAvgSearchTimePerBucketMs(), stats2.getAvgSearchTimePerBucketMs()) + || differSignificantly(stats1.getExponentialAvgSearchTimePerHourMs(), stats2.getExponentialAvgSearchTimePerHourMs()); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java index 0da4046edb49e..69d6936e9051f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporter.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.job.persistence; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; import java.util.Objects; @@ -35,8 +36,9 @@ public TimingStats getCurrentTimingStats() { return new TimingStats(currentTimingStats); } - public void reportBucketProcessingTime(long bucketProcessingTimeMs) { - currentTimingStats.updateStats(bucketProcessingTimeMs); + public void reportBucket(Bucket bucket) { + currentTimingStats.updateStats(bucket.getProcessingTimeMs()); + currentTimingStats.setLatestRecordTimestamp(bucket.getTimestamp().toInstant().plusSeconds(bucket.getBucketSpan())); if (differSignificantly(currentTimingStats, persistedTimingStats)) { flush(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java index d2d052b1a3e6c..f596fbc669af6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java @@ -213,7 +213,7 @@ void processResult(AutodetectResult result) { // persist after deleting interim results in case the new // results are also interim - timingStatsReporter.reportBucketProcessingTime(bucket.getProcessingTimeMs()); + timingStatsReporter.reportBucket(bucket); bulkResultsPersister.persistBucket(bucket).executeRequest(); ++bucketCount; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java index 9c48dd780a321..6daa0f5a0b842 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.junit.Before; import org.mockito.InOrder; @@ -35,81 +36,76 @@ public void setUpTests() { } public void testReportSearchDuration_Null() { - DatafeedTimingStatsReporter timingStatsReporter = - new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0), jobResultsPersister); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + DatafeedTimingStatsReporter timingStatsReporter = createReporter(createDatafeedTimingStats(JOB_ID, 3, 10, 10000.0)); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); timingStatsReporter.reportSearchDuration(null); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); verifyZeroInteractions(jobResultsPersister); } public void testReportSearchDuration_Zero() { - DatafeedTimingStatsReporter timingStatsReporter = - new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID), jobResultsPersister); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 0, 0, 0.0))); + DatafeedTimingStatsReporter timingStatsReporter = createReporter(createDatafeedTimingStats(JOB_ID, 0, 0, 0.0)); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 0, 0, 0.0))); timingStatsReporter.reportSearchDuration(TimeValue.ZERO); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 1, 0, 0.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 1, 0, 0.0))); - verify(jobResultsPersister).persistDatafeedTimingStats(new DatafeedTimingStats(JOB_ID, 1, 0, 0.0), RefreshPolicy.IMMEDIATE); + verify(jobResultsPersister).persistDatafeedTimingStats(createDatafeedTimingStats(JOB_ID, 1, 0, 0.0), RefreshPolicy.IMMEDIATE); verifyNoMoreInteractions(jobResultsPersister); } public void testReportSearchDuration() { - DatafeedTimingStatsReporter timingStatsReporter = - new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 13, 10, 10000.0), jobResultsPersister); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 13, 10, 10000.0))); + DatafeedTimingStatsReporter timingStatsReporter = createReporter(createDatafeedTimingStats(JOB_ID, 13, 10, 10000.0, 10000.0)); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 13, 10, 10000.0, 10000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 14, 10, 11000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 14, 10, 11000.0, 11000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 15, 10, 12000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 15, 10, 12000.0, 12000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 16, 10, 13000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 16, 10, 13000.0, 13000.0))); timingStatsReporter.reportSearchDuration(ONE_SECOND); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 17, 10, 14000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 17, 10, 14000.0, 14000.0))); InOrder inOrder = inOrder(jobResultsPersister); inOrder.verify(jobResultsPersister).persistDatafeedTimingStats( - new DatafeedTimingStats(JOB_ID, 15, 10, 12000.0), RefreshPolicy.IMMEDIATE); + createDatafeedTimingStats(JOB_ID, 15, 10, 12000.0, 12000.0), RefreshPolicy.IMMEDIATE); inOrder.verify(jobResultsPersister).persistDatafeedTimingStats( - new DatafeedTimingStats(JOB_ID, 17, 10, 14000.0), RefreshPolicy.IMMEDIATE); + createDatafeedTimingStats(JOB_ID, 17, 10, 14000.0, 14000.0), RefreshPolicy.IMMEDIATE); verifyNoMoreInteractions(jobResultsPersister); } public void testReportDataCounts_Null() { - DatafeedTimingStatsReporter timingStatsReporter = - new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0), jobResultsPersister); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + DatafeedTimingStatsReporter timingStatsReporter = createReporter(createDatafeedTimingStats(JOB_ID, 3, 10, 10000.0)); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); timingStatsReporter.reportDataCounts(null); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 10, 10000.0))); verifyZeroInteractions(jobResultsPersister); } public void testReportDataCounts() { - DatafeedTimingStatsReporter timingStatsReporter = - new DatafeedTimingStatsReporter(new DatafeedTimingStats(JOB_ID, 3, 20, 10000.0), jobResultsPersister); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 20, 10000.0))); + DatafeedTimingStatsReporter timingStatsReporter = createReporter(createDatafeedTimingStats(JOB_ID, 3, 20, 10000.0)); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 20, 10000.0))); timingStatsReporter.reportDataCounts(createDataCountsWithBucketCount(1)); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 21, 10000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 21, 10000.0))); timingStatsReporter.reportDataCounts(createDataCountsWithBucketCount(1)); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 22, 10000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 22, 10000.0))); timingStatsReporter.reportDataCounts(createDataCountsWithBucketCount(1)); - assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(new DatafeedTimingStats(JOB_ID, 3, 23, 10000.0))); + assertThat(timingStatsReporter.getCurrentTimingStats(), equalTo(createDatafeedTimingStats(JOB_ID, 3, 23, 10000.0))); InOrder inOrder = inOrder(jobResultsPersister); inOrder.verify(jobResultsPersister).persistDatafeedTimingStats( - new DatafeedTimingStats(JOB_ID, 3, 23, 10000.0), RefreshPolicy.IMMEDIATE); + createDatafeedTimingStats(JOB_ID, 3, 23, 10000.0), RefreshPolicy.IMMEDIATE); verifyNoMoreInteractions(jobResultsPersister); } @@ -122,35 +118,57 @@ private static DataCounts createDataCountsWithBucketCount(long bucketCount) { public void testTimingStatsDifferSignificantly() { assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 1000.0), createDatafeedTimingStats(JOB_ID, 5, 10, 1000.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 1100.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 1000.0), createDatafeedTimingStats(JOB_ID, 5, 10, 1100.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 1000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 1120.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 1000.0), createDatafeedTimingStats(JOB_ID, 5, 10, 1120.0)), is(true)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 10000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 11000.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 10000.0), createDatafeedTimingStats(JOB_ID, 5, 10, 11000.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 10000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 11200.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 10000.0), createDatafeedTimingStats(JOB_ID, 5, 10, 11200.0)), is(true)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 100000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 110000.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 100000.0), createDatafeedTimingStats(JOB_ID, 5, 10, 110000.0)), is(false)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 100000.0), new DatafeedTimingStats(JOB_ID, 5, 10, 110001.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 100000.0), createDatafeedTimingStats(JOB_ID, 5, 10, 110001.0)), is(true)); assertThat( DatafeedTimingStatsReporter.differSignificantly( - new DatafeedTimingStats(JOB_ID, 5, 10, 100000.0), new DatafeedTimingStats(JOB_ID, 50, 10, 100000.0)), + createDatafeedTimingStats(JOB_ID, 5, 10, 100000.0), createDatafeedTimingStats(JOB_ID, 50, 10, 100000.0)), is(true)); } + + private DatafeedTimingStatsReporter createReporter(DatafeedTimingStats timingStats) { + return new DatafeedTimingStatsReporter(timingStats, jobResultsPersister); + } + + private static DatafeedTimingStats createDatafeedTimingStats( + String jobId, + long searchCount, + long bucketCount, + double totalSearchTimeMs) { + return createDatafeedTimingStats(jobId, searchCount, bucketCount, totalSearchTimeMs, 0.0); + } + + private static DatafeedTimingStats createDatafeedTimingStats( + String jobId, + long searchCount, + long bucketCount, + double totalSearchTimeMs, + double incrementalSearchTimeMs) { + ExponentialAverageCalculationContext context = new ExponentialAverageCalculationContext(incrementalSearchTimeMs, null, null); + return new DatafeedTimingStats(jobId, searchCount, bucketCount, totalSearchTimeMs, context); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index 94017ef266f2c..4aff83ab39065 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -25,9 +25,11 @@ import org.elasticsearch.xpack.core.ml.job.results.BucketInfluencer; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import org.mockito.ArgumentCaptor; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.Date; @@ -208,7 +210,9 @@ public void testPersistTimingStats() { Client client = mockClient(bulkRequestCaptor); JobResultsPersister persister = new JobResultsPersister(client); - TimingStats timingStats = new TimingStats("foo", 7, 1.0, 2.0, 1.23, 7.89); + TimingStats timingStats = + new TimingStats( + "foo", 7, 1.0, 2.0, 1.23, 7.89, new ExponentialAverageCalculationContext(600.0, Instant.ofEpochMilli(123456789), 60.0)); persister.bulkPersisterBuilder(JOB_ID).persistTimingStats(timingStats).executeRequest(); verify(client, times(1)).bulk(bulkRequestCaptor.capture()); @@ -227,7 +231,11 @@ public void testPersistTimingStats() { "minimum_bucket_processing_time_ms", 1.0, "maximum_bucket_processing_time_ms", 2.0, "average_bucket_processing_time_ms", 1.23, - "exponential_average_bucket_processing_time_ms", 7.89))); + "exponential_average_bucket_processing_time_ms", 7.89, + "exponential_average_calculation_context", Map.of( + "incremental_metric_value_ms", 600.0, + "previous_exponential_average_ms", 60.0, + "latest_timestamp", 123456789)))); verify(client, times(1)).threadPool(); verifyNoMoreInteractions(client); @@ -247,7 +255,9 @@ public void testPersistDatafeedTimingStats() { .when(client).index(any(), any(ActionListener.class)); JobResultsPersister persister = new JobResultsPersister(client); - DatafeedTimingStats timingStats = new DatafeedTimingStats("foo", 6, 66, 666.0); + DatafeedTimingStats timingStats = + new DatafeedTimingStats( + "foo", 6, 66, 666.0, new ExponentialAverageCalculationContext(600.0, Instant.ofEpochMilli(123456789), 60.0)); persister.persistDatafeedTimingStats(timingStats, WriteRequest.RefreshPolicy.IMMEDIATE); ArgumentCaptor indexRequestCaptor = ArgumentCaptor.forClass(IndexRequest.class); @@ -264,7 +274,11 @@ public void testPersistDatafeedTimingStats() { "job_id", "foo", "search_count", 6, "bucket_count", 66, - "total_search_time_ms", 666.0))); + "total_search_time_ms", 666.0, + "exponential_average_calculation_context", Map.of( + "incremental_metric_value_ms", 600.0, + "previous_exponential_average_ms", 60.0, + "latest_timestamp", 123456789)))); verify(client, times(1)).threadPool(); verifyNoMoreInteractions(client); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index cf005d3c3da59..d845bb5751fd4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -55,10 +55,12 @@ import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import org.elasticsearch.xpack.ml.job.persistence.InfluencersQueryBuilder.InfluencersQuery; import org.mockito.ArgumentCaptor; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -845,7 +847,11 @@ public void testTimingStats_Ok() throws IOException { TimingStats.MIN_BUCKET_PROCESSING_TIME_MS.getPreferredName(), 1.0, TimingStats.MAX_BUCKET_PROCESSING_TIME_MS.getPreferredName(), 1000.0, TimingStats.AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), 666.0, - TimingStats.EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), 777.0)); + TimingStats.EXPONENTIAL_AVG_BUCKET_PROCESSING_TIME_MS.getPreferredName(), 777.0, + TimingStats.EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), Map.of( + ExponentialAverageCalculationContext.INCREMENTAL_METRIC_VALUE_MS.getPreferredName(), 100.0, + ExponentialAverageCalculationContext.LATEST_TIMESTAMP.getPreferredName(), Instant.ofEpochMilli(1000_000_000), + ExponentialAverageCalculationContext.PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName(), 200.0))); SearchResponse response = createSearchResponse(source); Client client = getMockedClient( queryBuilder -> assertThat(queryBuilder.getName(), equalTo("ids")), @@ -853,9 +859,11 @@ public void testTimingStats_Ok() throws IOException { when(client.prepareSearch(indexName)).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(indexName)); JobResultsProvider provider = createProvider(client); + ExponentialAverageCalculationContext context = + new ExponentialAverageCalculationContext(100.0, Instant.ofEpochMilli(1000_000_000), 200.0); provider.timingStats( "foo", - stats -> assertThat(stats, equalTo(new TimingStats("foo", 7, 1.0, 1000.0, 666.0, 777.0))), + stats -> assertThat(stats, equalTo(new TimingStats("foo", 7, 1.0, 1000.0, 666.0, 777.0, context))), e -> { throw new AssertionError(); }); verify(client).prepareSearch(indexName); @@ -904,14 +912,22 @@ public void testDatafeedTimingStats_MultipleDocumentsAtOnce() throws IOException Job.ID.getPreferredName(), "foo", DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), 6, DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), 66, - DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 666.0)); + DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 666.0, + DatafeedTimingStats.EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), Map.of( + ExponentialAverageCalculationContext.INCREMENTAL_METRIC_VALUE_MS.getPreferredName(), 600.0, + ExponentialAverageCalculationContext.LATEST_TIMESTAMP.getPreferredName(), Instant.ofEpochMilli(100000600), + ExponentialAverageCalculationContext.PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName(), 60.0))); List> sourceBar = Arrays.asList( Map.of( Job.ID.getPreferredName(), "bar", DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), 7, DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), 77, - DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 777.0)); + DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 777.0, + DatafeedTimingStats.EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), Map.of( + ExponentialAverageCalculationContext.INCREMENTAL_METRIC_VALUE_MS.getPreferredName(), 700.0, + ExponentialAverageCalculationContext.LATEST_TIMESTAMP.getPreferredName(), Instant.ofEpochMilli(100000700), + ExponentialAverageCalculationContext.PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName(), 70.0))); SearchResponse responseFoo = createSearchResponse(sourceFoo); SearchResponse responseBar = createSearchResponse(sourceBar); MultiSearchResponse multiSearchResponse = new MultiSearchResponse( @@ -940,6 +956,10 @@ public void testDatafeedTimingStats_MultipleDocumentsAtOnce() throws IOException new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(AnomalyDetectorsIndex.jobResultsAliasedName("bar"))); JobResultsProvider provider = createProvider(client); + ExponentialAverageCalculationContext contextFoo = + new ExponentialAverageCalculationContext(600.0, Instant.ofEpochMilli(100000600), 60.0); + ExponentialAverageCalculationContext contextBar = + new ExponentialAverageCalculationContext(700.0, Instant.ofEpochMilli(100000700), 70.0); provider.datafeedTimingStats( List.of("foo", "bar"), statsByJobId -> @@ -947,8 +967,8 @@ public void testDatafeedTimingStats_MultipleDocumentsAtOnce() throws IOException statsByJobId, equalTo( Map.of( - "foo", new DatafeedTimingStats("foo", 6, 66, 666.0), - "bar", new DatafeedTimingStats("bar", 7, 77, 777.0)))), + "foo", new DatafeedTimingStats("foo", 6, 66, 666.0, contextFoo), + "bar", new DatafeedTimingStats("bar", 7, 77, 777.0, contextBar)))), e -> { throw new AssertionError(); }); verify(client).threadPool(); @@ -967,7 +987,11 @@ public void testDatafeedTimingStats_Ok() throws IOException { Job.ID.getPreferredName(), "foo", DatafeedTimingStats.SEARCH_COUNT.getPreferredName(), 6, DatafeedTimingStats.BUCKET_COUNT.getPreferredName(), 66, - DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 666.0)); + DatafeedTimingStats.TOTAL_SEARCH_TIME_MS.getPreferredName(), 666.0, + DatafeedTimingStats.EXPONENTIAL_AVG_CALCULATION_CONTEXT.getPreferredName(), Map.of( + ExponentialAverageCalculationContext.INCREMENTAL_METRIC_VALUE_MS.getPreferredName(), 600.0, + ExponentialAverageCalculationContext.LATEST_TIMESTAMP.getPreferredName(), Instant.ofEpochMilli(100000600), + ExponentialAverageCalculationContext.PREVIOUS_EXPONENTIAL_AVERAGE_MS.getPreferredName(), 60.0))); SearchResponse response = createSearchResponse(source); Client client = getMockedClient( queryBuilder -> assertThat(queryBuilder.getName(), equalTo("ids")), @@ -975,9 +999,11 @@ public void testDatafeedTimingStats_Ok() throws IOException { when(client.prepareSearch(indexName)).thenReturn(new SearchRequestBuilder(client, SearchAction.INSTANCE).setIndices(indexName)); JobResultsProvider provider = createProvider(client); + ExponentialAverageCalculationContext contextFoo = + new ExponentialAverageCalculationContext(600.0, Instant.ofEpochMilli(100000600), 60.0); provider.datafeedTimingStats( "foo", - stats -> assertThat(stats, equalTo(new DatafeedTimingStats("foo", 6, 66, 666.0))), + stats -> assertThat(stats, equalTo(new DatafeedTimingStats("foo", 6, 66, 666.0, contextFoo))), e -> { throw new AssertionError(); }); verify(client).prepareSearch(indexName); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java index 4e5a97f860d9d..9e1e5646e115d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/TimingStatsReporterTests.java @@ -5,11 +5,18 @@ */ package org.elasticsearch.xpack.ml.job.persistence; +import org.elasticsearch.common.Nullable; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import org.junit.Before; import org.mockito.InOrder; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.inOrder; @@ -20,6 +27,8 @@ public class TimingStatsReporterTests extends ESTestCase { private static final String JOB_ID = "my-job-id"; + private static final Instant TIMESTAMP = Instant.ofEpochMilli(1000000000); + private static final Duration BUCKET_SPAN = Duration.ofMinutes(1); private JobResultsPersister.Builder bulkResultsPersister; @@ -29,56 +38,56 @@ public void setUpTests() { } public void testGetCurrentTimingStats() { - TimingStats stats = new TimingStats(JOB_ID, 7, 1.0, 2.0, 1.23, 7.89); - TimingStatsReporter reporter = new TimingStatsReporter(stats, bulkResultsPersister); + TimingStats stats = createTimingStats(JOB_ID, 7, 1.0, 2.0, 1.23, 7.89); + TimingStatsReporter reporter = createReporter(stats); assertThat(reporter.getCurrentTimingStats(), equalTo(stats)); verifyZeroInteractions(bulkResultsPersister); } public void testReporting() { - TimingStatsReporter reporter = new TimingStatsReporter(new TimingStats(JOB_ID), bulkResultsPersister); + TimingStatsReporter reporter = createReporter(new TimingStats(JOB_ID)); assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID))); - reporter.reportBucketProcessingTime(10); - assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0))); + reporter.reportBucket(createBucket(10)); + assertThat(reporter.getCurrentTimingStats(), equalTo(createTimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0, 10.0))); - reporter.reportBucketProcessingTime(20); - assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID, 2, 10.0, 20.0, 15.0, 10.1))); + reporter.reportBucket(createBucket(20)); + assertThat(reporter.getCurrentTimingStats(), equalTo(createTimingStats(JOB_ID, 2, 10.0, 20.0, 15.0, 10.1, 30.0))); - reporter.reportBucketProcessingTime(15); - assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID, 3, 10.0, 20.0, 15.0, 10.149))); + reporter.reportBucket(createBucket(15)); + assertThat(reporter.getCurrentTimingStats(), equalTo(createTimingStats(JOB_ID, 3, 10.0, 20.0, 15.0, 10.149, 45.0))); InOrder inOrder = inOrder(bulkResultsPersister); - inOrder.verify(bulkResultsPersister).persistTimingStats(new TimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0)); - inOrder.verify(bulkResultsPersister).persistTimingStats(new TimingStats(JOB_ID, 2, 10.0, 20.0, 15.0, 10.1)); + inOrder.verify(bulkResultsPersister).persistTimingStats(createTimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0, 10.0)); + inOrder.verify(bulkResultsPersister).persistTimingStats(createTimingStats(JOB_ID, 2, 10.0, 20.0, 15.0, 10.1, 30.0)); inOrder.verifyNoMoreInteractions(); } public void testFinishReporting() { - TimingStatsReporter reporter = new TimingStatsReporter(new TimingStats(JOB_ID), bulkResultsPersister); + TimingStatsReporter reporter = createReporter(new TimingStats(JOB_ID)); assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID))); - reporter.reportBucketProcessingTime(10); - assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0))); + reporter.reportBucket(createBucket(10)); + assertThat(reporter.getCurrentTimingStats(), equalTo(createTimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0, 10.0))); - reporter.reportBucketProcessingTime(10); - assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID, 2, 10.0, 10.0, 10.0, 10.0))); + reporter.reportBucket(createBucket(10)); + assertThat(reporter.getCurrentTimingStats(), equalTo(createTimingStats(JOB_ID, 2, 10.0, 10.0, 10.0, 10.0, 20.0))); - reporter.reportBucketProcessingTime(10); - assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID, 3, 10.0, 10.0, 10.0, 10.0))); + reporter.reportBucket(createBucket(10)); + assertThat(reporter.getCurrentTimingStats(), equalTo(createTimingStats(JOB_ID, 3, 10.0, 10.0, 10.0, 10.0, 30.0))); reporter.finishReporting(); - assertThat(reporter.getCurrentTimingStats(), equalTo(new TimingStats(JOB_ID, 3, 10.0, 10.0, 10.0, 10.0))); + assertThat(reporter.getCurrentTimingStats(), equalTo(createTimingStats(JOB_ID, 3, 10.0, 10.0, 10.0, 10.0, 30.0))); InOrder inOrder = inOrder(bulkResultsPersister); - inOrder.verify(bulkResultsPersister).persistTimingStats(new TimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0)); - inOrder.verify(bulkResultsPersister).persistTimingStats(new TimingStats(JOB_ID, 3, 10.0, 10.0, 10.0, 10.0)); + inOrder.verify(bulkResultsPersister).persistTimingStats(createTimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0, 10.0)); + inOrder.verify(bulkResultsPersister).persistTimingStats(createTimingStats(JOB_ID, 3, 10.0, 10.0, 10.0, 10.0, 30.0)); inOrder.verifyNoMoreInteractions(); } public void testFinishReportingNoChange() { - TimingStatsReporter reporter = new TimingStatsReporter(new TimingStats(JOB_ID), bulkResultsPersister); + TimingStatsReporter reporter = createReporter(new TimingStats(JOB_ID)); reporter.finishReporting(); @@ -86,27 +95,27 @@ public void testFinishReportingNoChange() { } public void testFinishReportingWithChange() { - TimingStatsReporter reporter = new TimingStatsReporter(new TimingStats(JOB_ID), bulkResultsPersister); + TimingStatsReporter reporter = createReporter(new TimingStats(JOB_ID)); - reporter.reportBucketProcessingTime(10); + reporter.reportBucket(createBucket(10)); reporter.finishReporting(); - verify(bulkResultsPersister).persistTimingStats(new TimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0)); + verify(bulkResultsPersister).persistTimingStats(createTimingStats(JOB_ID, 1, 10.0, 10.0, 10.0, 10.0, 10.0)); } public void testTimingStatsDifferSignificantly() { assertThat( TimingStatsReporter.differSignificantly( - new TimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0), new TimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0)), + createTimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0), createTimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0)), is(false)); assertThat( TimingStatsReporter.differSignificantly( - new TimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0), new TimingStats(JOB_ID, 10, 10.0, 11.0, 1.0, 10.0)), + createTimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0), createTimingStats(JOB_ID, 10, 10.0, 11.0, 1.0, 10.0)), is(false)); assertThat( TimingStatsReporter.differSignificantly( - new TimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0), new TimingStats(JOB_ID, 10, 10.0, 12.0, 1.0, 10.0)), + createTimingStats(JOB_ID, 10, 10.0, 10.0, 1.0, 10.0), createTimingStats(JOB_ID, 10, 10.0, 12.0, 1.0, 10.0)), is(true)); } @@ -121,4 +130,51 @@ public void testValuesDifferSignificantly() { assertThat(TimingStatsReporter.differSignificantly(0.0, 1.0), is(true)); assertThat(TimingStatsReporter.differSignificantly(1.0, 0.0), is(true)); } + + private TimingStatsReporter createReporter(TimingStats timingStats) { + return new TimingStatsReporter(timingStats, bulkResultsPersister); + } + + private static TimingStats createTimingStats( + String jobId, + long bucketCount, + @Nullable Double minBucketProcessingTimeMs, + @Nullable Double maxBucketProcessingTimeMs, + @Nullable Double avgBucketProcessingTimeMs, + @Nullable Double exponentialAvgBucketProcessingTimeMs) { + return createTimingStats( + jobId, + bucketCount, + minBucketProcessingTimeMs, + maxBucketProcessingTimeMs, + avgBucketProcessingTimeMs, + exponentialAvgBucketProcessingTimeMs, + 0.0); + } + + private static TimingStats createTimingStats( + String jobId, + long bucketCount, + @Nullable Double minBucketProcessingTimeMs, + @Nullable Double maxBucketProcessingTimeMs, + @Nullable Double avgBucketProcessingTimeMs, + @Nullable Double exponentialAvgBucketProcessingTimeMs, + double incrementalBucketProcessingTimeMs) { + ExponentialAverageCalculationContext context = + new ExponentialAverageCalculationContext(incrementalBucketProcessingTimeMs, TIMESTAMP.plus(BUCKET_SPAN), null); + return new TimingStats( + jobId, + bucketCount, + minBucketProcessingTimeMs, + maxBucketProcessingTimeMs, + avgBucketProcessingTimeMs, + exponentialAvgBucketProcessingTimeMs, + context); + } + + private static Bucket createBucket(long processingTimeMs) { + Bucket bucket = new Bucket(JOB_ID, Date.from(TIMESTAMP), BUCKET_SPAN.getSeconds()); + bucket.setProcessingTimeMs(processingTimeMs); + return bucket; + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java index 1e0263185798f..586f3c039e21b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessorTests.java @@ -134,7 +134,7 @@ public void testProcessResult_bucket() { when(bulkBuilder.persistTimingStats(any(TimingStats.class))).thenReturn(bulkBuilder); when(bulkBuilder.persistBucket(any(Bucket.class))).thenReturn(bulkBuilder); AutodetectResult result = mock(AutodetectResult.class); - Bucket bucket = mock(Bucket.class); + Bucket bucket = new Bucket(JOB_ID, new Date(), BUCKET_SPAN_MS); when(result.getBucket()).thenReturn(bucket); processorUnderTest.setDeleteInterimRequired(false); @@ -151,7 +151,7 @@ public void testProcessResult_bucket_deleteInterimRequired() { when(bulkBuilder.persistTimingStats(any(TimingStats.class))).thenReturn(bulkBuilder); when(bulkBuilder.persistBucket(any(Bucket.class))).thenReturn(bulkBuilder); AutodetectResult result = mock(AutodetectResult.class); - Bucket bucket = mock(Bucket.class); + Bucket bucket = new Bucket(JOB_ID, new Date(), BUCKET_SPAN_MS); when(result.getBucket()).thenReturn(bucket); processorUnderTest.processResult(result); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/AutodetectParamsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/AutodetectParamsTests.java index 616dd6cba5a14..2740f0ec01cb9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/AutodetectParamsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/AutodetectParamsTests.java @@ -7,6 +7,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import static org.hamcrest.Matchers.equalTo; @@ -15,13 +16,17 @@ public class AutodetectParamsTests extends ESTestCase { private static final String JOB_ID = "my-job"; public void testBuilder_WithTimingStats() { - TimingStats timingStats = new TimingStats(JOB_ID, 7, 1.0, 1000.0, 666.0, 1000.0); + TimingStats timingStats = new TimingStats(JOB_ID, 7, 1.0, 1000.0, 666.0, 1000.0, new ExponentialAverageCalculationContext()); AutodetectParams params = new AutodetectParams.Builder(JOB_ID).setTimingStats(timingStats).build(); assertThat(params.timingStats(), equalTo(timingStats)); timingStats.updateStats(2000.0); - assertThat(timingStats, equalTo(new TimingStats(JOB_ID, 8, 1.0, 2000.0, 832.75, 1010.0))); - assertThat(params.timingStats(), equalTo(new TimingStats(JOB_ID, 7, 1.0, 1000.0, 666.0, 1000.0))); + assertThat( + timingStats, + equalTo(new TimingStats(JOB_ID, 8, 1.0, 2000.0, 832.75, 1010.0, new ExponentialAverageCalculationContext(2000.0, null, null)))); + assertThat( + params.timingStats(), + equalTo(new TimingStats(JOB_ID, 7, 1.0, 1000.0, 666.0, 1000.0, new ExponentialAverageCalculationContext()))); } public void testBuilder_WithoutTimingStats() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java index aa56e392de2bb..9cab6c725a71d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java @@ -27,13 +27,15 @@ public class BucketTests extends AbstractSerializingTestCase { + private static final long MAX_BUCKET_SPAN_SEC = 100_000_000_000L; // bucket span of > 3000 years should be enough for everyone + @Override public Bucket createTestInstance() { return createTestInstance("foo"); } public Bucket createTestInstance(String jobId) { - Bucket bucket = new Bucket(jobId, randomDate(), randomNonNegativeLong()); + Bucket bucket = new Bucket(jobId, randomDate(), randomLongBetween(1, MAX_BUCKET_SPAN_SEC)); if (randomBoolean()) { bucket.setAnomalyScore(randomDouble()); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java index ebfbf8d223db9..23de6258b3fe0 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; import org.elasticsearch.xpack.core.ml.stats.ForecastStats; +import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.exporter.BaseMonitoringDocTestCase; @@ -103,7 +104,8 @@ public void testToXContent() throws IOException { final DataCounts dataCounts = new DataCounts("_job_id", 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, date3, date4, date5, date6, date7); final ForecastStats forecastStats = new ForecastStats(); - final TimingStats timingStats = new TimingStats("_job_id", 100, 10.0, 30.0, 20.0, 25.0); + final TimingStats timingStats = new TimingStats( + "_job_id", 100, 10.0, 30.0, 20.0, 25.0, new ExponentialAverageCalculationContext(50.0, null, null)); final JobStats jobStats = new JobStats( "_job", dataCounts, modelStats, forecastStats, JobState.OPENED, discoveryNode, "_explanation", time, timingStats); final MonitoringDoc.Node node = new MonitoringDoc.Node("_uuid", "_host", "_addr", "_ip", "_name", 1504169190855L); @@ -179,7 +181,8 @@ public void testToXContent() throws IOException { + "\"minimum_bucket_processing_time_ms\":10.0," + "\"maximum_bucket_processing_time_ms\":30.0," + "\"average_bucket_processing_time_ms\":20.0," - + "\"exponential_average_bucket_processing_time_ms\":25.0" + + "\"exponential_average_bucket_processing_time_ms\":25.0," + + "\"exponential_average_bucket_processing_time_per_hour_ms\":50.0" + "}" + "}" + "}", xContent.utf8ToString()); From fa0c54ce5655c4a245fcdbeb86ef04f3df200065 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 26 Jul 2019 14:34:09 +0200 Subject: [PATCH 42/51] Fix Test Failure in ScalingThreadPoolTests (#44898) * Due to #44894 some constellations log a deprecation warning here now * Fixed by checking for that --- .../threadpool/ScalingThreadPoolTests.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index a9436053ae902..d4e6f3693b712 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -48,13 +48,17 @@ public void testScalingThreadPoolConfiguration() throws InterruptedException { core = "generic".equals(threadPoolName) ? 4 : 1; // the defaults } + final int availableProcessors = Runtime.getRuntime().availableProcessors(); final int maxBasedOnNumberOfProcessors; + final int processorsUsed; if (randomBoolean()) { final int processors = randomIntBetween(1, 64); maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); builder.put("processors", processors); + processorsUsed = processors; } else { - maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, Runtime.getRuntime().availableProcessors()); + maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, availableProcessors); + processorsUsed = availableProcessors; } final int expectedMax; @@ -93,6 +97,11 @@ public void testScalingThreadPoolConfiguration() throws InterruptedException { assertThat(info.getMax(), equalTo(expectedMax)); assertThat(esThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedMax)); }); + + if (processorsUsed > availableProcessors) { + assertWarnings("setting processors to value [" + processorsUsed + + "] which is more than available processors [" + availableProcessors + "] is deprecated"); + } } private int expectedSize(final String threadPoolName, final int numberOfProcessors) { From 337ad068af7d8bc8526e8b14d7961f03a3653e65 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 26 Jul 2019 16:58:39 +0200 Subject: [PATCH 43/51] Ensure index is green in SimpleClusterStateIT.testIndicesOptions() (#44893) SimpleClusterStateIT testIndicesOptions failed in #44817 because it tries to close an index at the beginning of the test. With random index settings, it is possible that the index has a high number of shards (10) and replicas (1), which means that on CI this index can take time to be fully allocated. The close index request can fail in the case where replicas are still recovering operations. Thiscommit adds a simple ensureGreen() at the beginning of the test to be sure that all replicas are started before trying to close the index. closes #44817 --- .../java/org/elasticsearch/cluster/SimpleClusterStateIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 27032b1a50c61..6c0b9fb7f80c9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -257,6 +257,7 @@ public void testIndicesOptions() throws Exception { ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("f*") .get(); assertThat(clusterStateResponse.getState().metaData().indices().size(), is(2)); + ensureGreen("fuu"); // close one index assertAcked(client().admin().indices().close(Requests.closeIndexRequest("fuu")).get()); From 5b2d1a5d3994f5c032417db7b24f67dd579ce86f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 27 Jul 2019 00:07:12 +0900 Subject: [PATCH 44/51] Limit processors by available processors (#44894) This commit limits the processors setting to be more than the number of available processors. --- .../migration/migrate_8_0/settings.asciidoc | 10 +++++++++ .../common/util/concurrent/EsExecutors.java | 22 ++++--------------- .../util/concurrent/EsExecutorsTests.java | 12 ++++++++++ .../threadpool/ScalingThreadPoolTests.java | 18 ++------------- 4 files changed, 28 insertions(+), 34 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/settings.asciidoc b/docs/reference/migration/migrate_8_0/settings.asciidoc index 0c21ae4021aa7..4eac119538dd7 100644 --- a/docs/reference/migration/migrate_8_0/settings.asciidoc +++ b/docs/reference/migration/migrate_8_0/settings.asciidoc @@ -11,3 +11,13 @@ provided automatic upgrading of these settings to their `cluster.remote` counterparts. In 8.0.0, these settings have been removed. Elasticsearch will refuse to start if you have these settings in your configuration or cluster state. + +[float] +==== `processors` can no longer exceed the available number of processors + +Previously it was possible to set the number of processors used to set the +default sizes for the thread pools to be more than the number of available +processors. As this leads to more context switches and more threads but without +an increase in the number of physical CPUs on which to schedule these additional +threads, the `processors` setting is now bounded by the number of available +processors. diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 561a820d49078..1623ffdf82565 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -19,10 +19,8 @@ package org.elasticsearch.common.util.concurrent; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -48,26 +46,14 @@ public class EsExecutors { - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(EsExecutors.class)); - /** * Setting to manually set the number of available processors. This setting is used to adjust thread pool sizes per node. */ - public static final Setting PROCESSORS_SETTING = new Setting<>( + public static final Setting PROCESSORS_SETTING = Setting.intSetting( "processors", - s -> Integer.toString(Runtime.getRuntime().availableProcessors()), - s -> { - final int value = Setting.parseInt(s, 1, "processors"); - final int availableProcessors = Runtime.getRuntime().availableProcessors(); - if (value > availableProcessors) { - deprecationLogger.deprecatedAndMaybeLog( - "processors", - "setting processors to value [{}] which is more than available processors [{}] is deprecated", - value, - availableProcessors); - } - return value; - }, + Runtime.getRuntime().availableProcessors(), + 1, + Runtime.getRuntime().availableProcessors(), Property.NodeScope); /** diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index 0f0350c48210c..6a64a93227747 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -32,6 +32,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.lessThan; /** @@ -388,4 +389,15 @@ public void testGetTasks() throws InterruptedException { } } + public void testProcessorsBound() { + final int available = Runtime.getRuntime().availableProcessors(); + final int processors = randomIntBetween(available + 1, Integer.MAX_VALUE); + final Settings settings = Settings.builder().put("processors", processors).build(); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> EsExecutors.PROCESSORS_SETTING.get(settings)); + assertThat( + e, + hasToString(containsString("Failed to parse value [" + processors + "] for setting [processors] must be <= " + available))); + } + } diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index d4e6f3693b712..097d856f06289 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -48,18 +48,8 @@ public void testScalingThreadPoolConfiguration() throws InterruptedException { core = "generic".equals(threadPoolName) ? 4 : 1; // the defaults } - final int availableProcessors = Runtime.getRuntime().availableProcessors(); - final int maxBasedOnNumberOfProcessors; - final int processorsUsed; - if (randomBoolean()) { - final int processors = randomIntBetween(1, 64); - maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); - builder.put("processors", processors); - processorsUsed = processors; - } else { - maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, availableProcessors); - processorsUsed = availableProcessors; - } + final int processors = randomIntBetween(1, Runtime.getRuntime().availableProcessors()); + final int maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); final int expectedMax; if (maxBasedOnNumberOfProcessors < core || randomBoolean()) { @@ -98,10 +88,6 @@ public void testScalingThreadPoolConfiguration() throws InterruptedException { assertThat(esThreadPoolExecutor.getMaximumPoolSize(), equalTo(expectedMax)); }); - if (processorsUsed > availableProcessors) { - assertWarnings("setting processors to value [" + processorsUsed + - "] which is more than available processors [" + availableProcessors + "] is deprecated"); - } } private int expectedSize(final String threadPoolName, final int numberOfProcessors) { From 8ebe466f1e0aebc5fbf0e0ff704bd064102ccfb8 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 27 Jul 2019 00:54:06 +0900 Subject: [PATCH 45/51] Fix failing scaling thread pool test The previous commit took away the distinction between relying on the defaults versus relying on an explicit setting for processors. This commit adds this back, and adjusts the logic to account for the fact that processors can not exceed available processors any longer. --- .../threadpool/ScalingThreadPoolTests.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index 097d856f06289..171a1f66d29c0 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -48,8 +48,17 @@ public void testScalingThreadPoolConfiguration() throws InterruptedException { core = "generic".equals(threadPoolName) ? 4 : 1; // the defaults } - final int processors = randomIntBetween(1, Runtime.getRuntime().availableProcessors()); - final int maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); + final int availableProcessors = Runtime.getRuntime().availableProcessors(); + final int maxBasedOnNumberOfProcessors; + final int processors; + if (randomBoolean()) { + processors = randomIntBetween(1, availableProcessors); + maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); + builder.put("processors", processors); + } else { + maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, availableProcessors); + processors = availableProcessors; + } final int expectedMax; if (maxBasedOnNumberOfProcessors < core || randomBoolean()) { From a76242d43d1a668b45be6c48f24a5dfde464038e Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Fri, 26 Jul 2019 09:57:31 -0600 Subject: [PATCH 46/51] Update serialization version for ILM explain filters --- .../xpack/core/indexlifecycle/ExplainLifecycleRequest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java index 01e70dcce0920..0a4d65d3e01df 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ExplainLifecycleRequest.java @@ -23,7 +23,7 @@ * {@link #indices(String...)} method */ public class ExplainLifecycleRequest extends ClusterInfoRequest { - private static final Version FILTERS_INTRODUCED_VERSION = Version.V_8_0_0; + private static final Version FILTERS_INTRODUCED_VERSION = Version.V_7_4_0; private boolean onlyErrors = false; private boolean onlyManaged = false; From f603f06250ab72b229bdf45476a73c9eeb21ac46 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 26 Jul 2019 12:14:05 -0400 Subject: [PATCH 47/51] Geo: refactor geo mapper and query builder (#44884) Refactors out the indexing and query generation logic out of the mapper and query builder into a separate unit-testable classes. --- .../common/geo/parsers/GeoJsonParser.java | 12 +- .../common/geo/parsers/GeoWKTParser.java | 12 +- .../common/geo/parsers/ShapeParser.java | 4 +- ....java => AbstractGeometryFieldMapper.java} | 132 ++++++-- .../index/mapper/GeoShapeFieldMapper.java | 72 ++--- .../mapper/GeoShapeIndexer.java} | 55 ++-- .../mapper/LegacyGeoShapeFieldMapper.java | 68 ++-- .../index/mapper/LegacyGeoShapeIndexer.java | 35 +++ .../index/query/GeoShapeQueryBuilder.java | 294 +----------------- .../query/LegacyGeoShapeQueryProcessor.java | 197 ++++++++++++ .../query/VectorGeoShapeQueryProcessor.java | 171 ++++++++++ .../elasticsearch/indices/IndicesModule.java | 5 +- .../common/geo/BaseGeoParsingTestCase.java | 3 +- .../common/geo/GeoJsonShapeParserTests.java | 3 +- .../common/geo/GeoWKTShapeParserTests.java | 3 +- .../common/geo/GeometryIOTests.java | 2 +- .../common/geo/GeometryIndexerTests.java | 7 +- .../common/geo/ShapeBuilderTests.java | 3 +- .../index/mapper/ExternalMapper.java | 8 +- .../mapper/GeoShapeFieldMapperTests.java | 3 +- 20 files changed, 648 insertions(+), 441 deletions(-) rename server/src/main/java/org/elasticsearch/index/mapper/{BaseGeoShapeFieldMapper.java => AbstractGeometryFieldMapper.java} (73%) rename server/src/main/java/org/elasticsearch/{common/geo/GeometryIndexer.java => index/mapper/GeoShapeIndexer.java} (96%) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeIndexer.java create mode 100644 server/src/main/java/org/elasticsearch/index/query/LegacyGeoShapeQueryProcessor.java create mode 100644 server/src/main/java/org/elasticsearch/index/query/VectorGeoShapeQueryProcessor.java diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index b008786ed9211..8ab6a44f26e8e 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentSubParser; -import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; +import org.elasticsearch.index.mapper.AbstractGeometryFieldMapper; import org.locationtech.jts.geom.Coordinate; import java.io.IOException; @@ -42,7 +42,7 @@ * complies with geojson specification: https://tools.ietf.org/html/rfc7946 */ abstract class GeoJsonParser { - protected static ShapeBuilder parse(XContentParser parser, BaseGeoShapeFieldMapper shapeMapper) + protected static ShapeBuilder parse(XContentParser parser, AbstractGeometryFieldMapper shapeMapper) throws IOException { GeoShapeType shapeType = null; DistanceUnit.Distance radius = null; @@ -50,13 +50,13 @@ protected static ShapeBuilder parse(XContentParser parser, BaseGeoShapeFieldMapp GeometryCollectionBuilder geometryCollections = null; Orientation orientation = (shapeMapper == null) - ? BaseGeoShapeFieldMapper.Defaults.ORIENTATION.value() + ? AbstractGeometryFieldMapper.Defaults.ORIENTATION.value() : shapeMapper.orientation(); Explicit coerce = (shapeMapper == null) - ? BaseGeoShapeFieldMapper.Defaults.COERCE + ? AbstractGeometryFieldMapper.Defaults.COERCE : shapeMapper.coerce(); Explicit ignoreZValue = (shapeMapper == null) - ? BaseGeoShapeFieldMapper.Defaults.IGNORE_Z_VALUE + ? AbstractGeometryFieldMapper.Defaults.IGNORE_Z_VALUE : shapeMapper.ignoreZValue(); String malformedException = null; @@ -208,7 +208,7 @@ private static Coordinate parseCoordinate(XContentParser parser, boolean ignoreZ * @return Geometry[] geometries of the GeometryCollection * @throws IOException Thrown if an error occurs while reading from the XContentParser */ - static GeometryCollectionBuilder parseGeometries(XContentParser parser, BaseGeoShapeFieldMapper mapper) throws + static GeometryCollectionBuilder parseGeometries(XContentParser parser, AbstractGeometryFieldMapper mapper) throws IOException { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { throw new ElasticsearchParseException("geometries must be an array of geojson objects"); diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 2cffa417246fd..69c84a52a2db4 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -34,7 +34,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; +import org.elasticsearch.index.mapper.AbstractGeometryFieldMapper; import org.locationtech.jts.geom.Coordinate; import java.io.IOException; @@ -63,7 +63,7 @@ public class GeoWKTParser { // no instance private GeoWKTParser() {} - public static ShapeBuilder parse(XContentParser parser, final BaseGeoShapeFieldMapper shapeMapper) + public static ShapeBuilder parse(XContentParser parser, final AbstractGeometryFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { return parseExpectedType(parser, null, shapeMapper); } @@ -75,12 +75,12 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha /** throws an exception if the parsed geometry type does not match the expected shape type */ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType, - final BaseGeoShapeFieldMapper shapeMapper) + final AbstractGeometryFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { try (StringReader reader = new StringReader(parser.text())) { - Explicit ignoreZValue = (shapeMapper == null) ? BaseGeoShapeFieldMapper.Defaults.IGNORE_Z_VALUE : + Explicit ignoreZValue = (shapeMapper == null) ? AbstractGeometryFieldMapper.Defaults.IGNORE_Z_VALUE : shapeMapper.ignoreZValue(); - Explicit coerce = (shapeMapper == null) ? BaseGeoShapeFieldMapper.Defaults.COERCE : shapeMapper.coerce(); + Explicit coerce = (shapeMapper == null) ? AbstractGeometryFieldMapper.Defaults.COERCE : shapeMapper.coerce(); // setup the tokenizer; configured to read words w/o numbers StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); @@ -258,7 +258,7 @@ private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean return null; } PolygonBuilder builder = new PolygonBuilder(parseLinearRing(stream, ignoreZValue, coerce), - BaseGeoShapeFieldMapper.Defaults.ORIENTATION.value()); + AbstractGeometryFieldMapper.Defaults.ORIENTATION.value()); while (nextCloserOrComma(stream).equals(COMMA)) { builder.hole(parseLinearRing(stream, ignoreZValue, coerce)); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 4a976d19b2347..f15b2f2777f91 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.MapXContentParser; -import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; +import org.elasticsearch.index.mapper.AbstractGeometryFieldMapper; import java.io.IOException; import java.util.Collections; @@ -50,7 +50,7 @@ public interface ShapeParser { * if the parsers current token has been null * @throws IOException if the input could not be read */ - static ShapeBuilder parse(XContentParser parser, BaseGeoShapeFieldMapper shapeMapper) throws IOException { + static ShapeBuilder parse(XContentParser parser, AbstractGeometryFieldMapper shapeMapper) throws IOException { if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { return null; } if (parser.currentToken() == XContentParser.Token.START_OBJECT) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java similarity index 73% rename from server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java rename to server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 20151f301d791..e47060c87e539 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BaseGeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -26,17 +26,22 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.geo.geometry.Geometry; import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper.DeprecatedParameters; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import java.io.IOException; +import java.text.ParseException; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -47,8 +52,8 @@ /** * Base class for {@link GeoShapeFieldMapper} and {@link LegacyGeoShapeFieldMapper} */ -public abstract class BaseGeoShapeFieldMapper extends FieldMapper { - public static final String CONTENT_TYPE = "geo_shape"; +public abstract class AbstractGeometryFieldMapper extends FieldMapper { + public static class Names { public static final ParseField ORIENTATION = new ParseField("orientation"); @@ -62,7 +67,36 @@ public static class Defaults { public static final Explicit IGNORE_Z_VALUE = new Explicit<>(true, false); } - public abstract static class Builder + + /** + * Interface representing an preprocessor in geo-shape indexing pipeline + */ + public interface Indexer { + + Processed prepareForIndexing(Parsed geometry); + + Class processedClass(); + + } + + /** + * interface representing parser in geo shape indexing pipeline + */ + public interface Parser { + + Parsed parse(XContentParser parser, AbstractGeometryFieldMapper mapper) throws IOException, ParseException; + + } + + /** + * interface representing a query builder that generates a query from the given shape + */ + public interface QueryProcessor { + + Query process(Geometry shape, String fieldName, SpatialStrategy strategy, ShapeRelation relation, QueryShardContext context); + } + + public abstract static class Builder extends FieldMapper.Builder { protected Boolean coerce; protected Boolean ignoreMalformed; @@ -152,7 +186,7 @@ protected void setupFieldType(BuilderContext context) { throw new IllegalArgumentException("name cannot be empty string"); } - BaseGeoShapeFieldType ft = (BaseGeoShapeFieldType)fieldType(); + AbstractGeometryFieldType ft = (AbstractGeometryFieldType)fieldType(); ft.setOrientation(orientation().value()); } } @@ -218,10 +252,16 @@ public Mapper.Builder parse(String name, Map node, ParserContext } } - public abstract static class BaseGeoShapeFieldType extends MappedFieldType { + public abstract static class AbstractGeometryFieldType extends MappedFieldType { protected Orientation orientation = Defaults.ORIENTATION.value(); - protected BaseGeoShapeFieldType() { + protected Indexer geometryIndexer; + + protected Parser geometryParser; + + protected QueryProcessor geometryQueryBuilder; + + protected AbstractGeometryFieldType() { setIndexOptions(IndexOptions.DOCS); setTokenized(false); setStored(false); @@ -229,7 +269,7 @@ protected BaseGeoShapeFieldType() { setOmitNorms(true); } - protected BaseGeoShapeFieldType(BaseGeoShapeFieldType ref) { + protected AbstractGeometryFieldType(AbstractGeometryFieldType ref) { super(ref); this.orientation = ref.orientation; } @@ -237,7 +277,7 @@ protected BaseGeoShapeFieldType(BaseGeoShapeFieldType ref) { @Override public boolean equals(Object o) { if (!super.equals(o)) return false; - BaseGeoShapeFieldType that = (BaseGeoShapeFieldType) o; + AbstractGeometryFieldType that = (AbstractGeometryFieldType) o; return orientation == that.orientation; } @@ -246,16 +286,6 @@ public int hashCode() { return Objects.hash(super.hashCode(), orientation); } - @Override - public String typeName() { - return CONTENT_TYPE; - } - - @Override - public void checkCompatibility(MappedFieldType fieldType, List conflicts) { - super.checkCompatibility(fieldType, conflicts); - } - public Orientation orientation() { return this.orientation; } public void setOrientation(Orientation orientation) { @@ -272,16 +302,40 @@ public Query existsQuery(QueryShardContext context) { public Query termQuery(Object value, QueryShardContext context) { throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead"); } + + public void setGeometryIndexer(Indexer geometryIndexer) { + this.geometryIndexer = geometryIndexer; + } + + protected Indexer geometryIndexer() { + return geometryIndexer; + } + + public void setGeometryParser(Parser geometryParser) { + this.geometryParser = geometryParser; + } + + protected Parser geometryParser() { + return geometryParser; + } + + public void setGeometryQueryBuilder(QueryProcessor geometryQueryBuilder) { + this.geometryQueryBuilder = geometryQueryBuilder; + } + + public QueryProcessor geometryQueryBuilder() { + return geometryQueryBuilder; + } } protected Explicit coerce; protected Explicit ignoreMalformed; protected Explicit ignoreZValue; - protected BaseGeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, - Explicit ignoreMalformed, Explicit coerce, - Explicit ignoreZValue, Settings indexSettings, - MultiFields multiFields, CopyTo copyTo) { + protected AbstractGeometryFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Explicit ignoreMalformed, Explicit coerce, + Explicit ignoreZValue, Settings indexSettings, + MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.coerce = coerce; this.ignoreMalformed = ignoreMalformed; @@ -291,7 +345,7 @@ protected BaseGeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, @Override protected void doMerge(Mapper mergeWith) { super.doMerge(mergeWith); - BaseGeoShapeFieldMapper gsfm = (BaseGeoShapeFieldMapper)mergeWith; + AbstractGeometryFieldMapper gsfm = (AbstractGeometryFieldMapper)mergeWith; if (gsfm.coerce.explicit()) { this.coerce = gsfm.coerce; } @@ -310,7 +364,7 @@ protected void parseCreateField(ParseContext context, List field @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { builder.field("type", contentType()); - BaseGeoShapeFieldType ft = (BaseGeoShapeFieldType)fieldType(); + AbstractGeometryFieldType ft = (AbstractGeometryFieldType)fieldType(); if (includeDefaults || ft.orientation() != Defaults.ORIENTATION.value()) { builder.field(Names.ORIENTATION.getPreferredName(), ft.orientation()); } @@ -338,11 +392,35 @@ public Explicit ignoreZValue() { } public Orientation orientation() { - return ((BaseGeoShapeFieldType)fieldType).orientation(); + return ((AbstractGeometryFieldType)fieldType).orientation(); } + protected abstract void indexShape(ParseContext context, Processed shape); + + /** parsing logic for geometry indexing */ @Override - protected String contentType() { - return CONTENT_TYPE; + public void parse(ParseContext context) throws IOException { + AbstractGeometryFieldType fieldType = (AbstractGeometryFieldType)fieldType(); + + @SuppressWarnings("unchecked") Indexer geometryIndexer = fieldType.geometryIndexer(); + @SuppressWarnings("unchecked") Parser geometryParser = fieldType.geometryParser(); + try { + Processed shape = context.parseExternalValue(geometryIndexer.processedClass()); + if (shape == null) { + Parsed geometry = geometryParser.parse(context.parser(), this); + if (geometry == null) { + return; + } + shape = geometryIndexer.prepareForIndexing(geometry); + } + indexShape(context, shape); + } catch (Exception e) { + if (ignoreMalformed.value() == false) { + throw new MapperParsingException("failed to parse field [{}] of type [{}]", e, fieldType().name(), + fieldType().typeName()); + } + context.addIgnoredField(fieldType().name()); + } } + } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 2ce1d5328f3b9..7bd5eba115adc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.geo.Polygon; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.Explicit; -import org.elasticsearch.common.geo.GeometryIndexer; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.settings.Settings; @@ -37,8 +36,8 @@ import org.elasticsearch.geo.geometry.MultiPoint; import org.elasticsearch.geo.geometry.MultiPolygon; import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.index.query.VectorGeoShapeQueryProcessor; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -62,9 +61,10 @@ *

* "field" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0)) */ -public class GeoShapeFieldMapper extends BaseGeoShapeFieldMapper { +public class GeoShapeFieldMapper extends AbstractGeometryFieldMapper { + public static final String CONTENT_TYPE = "geo_shape"; - public static class Builder extends BaseGeoShapeFieldMapper.Builder { + public static class Builder extends AbstractGeometryFieldMapper.Builder { public Builder(String name) { super (name, new GeoShapeFieldType(), new GeoShapeFieldType()); } @@ -75,9 +75,21 @@ public GeoShapeFieldMapper build(BuilderContext context) { return new GeoShapeFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), ignoreZValue(), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } + + @Override + protected void setupFieldType(BuilderContext context) { + super.setupFieldType(context); + + GeometryParser geometryParser = new GeometryParser(orientation == ShapeBuilder.Orientation.RIGHT, coerce(context).value(), + ignoreZValue().value()); + + ((GeoShapeFieldType)fieldType()).setGeometryIndexer(new GeoShapeIndexer(orientation == ShapeBuilder.Orientation.RIGHT)); + ((GeoShapeFieldType)fieldType()).setGeometryParser( (parser, mapper) -> geometryParser.parse(parser)); + ((GeoShapeFieldType)fieldType()).setGeometryQueryBuilder(new VectorGeoShapeQueryProcessor()); + } } - public static final class GeoShapeFieldType extends BaseGeoShapeFieldType { + public static final class GeoShapeFieldType extends AbstractGeometryFieldType { public GeoShapeFieldType() { super(); } @@ -90,10 +102,17 @@ protected GeoShapeFieldType(GeoShapeFieldType ref) { public GeoShapeFieldType clone() { return new GeoShapeFieldType(this); } - } - private final GeometryParser geometryParser; - private final GeometryIndexer geometryIndexer; + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + protected Indexer geometryIndexer() { + return new GeoShapeIndexer(orientation == ShapeBuilder.Orientation.RIGHT); + } + } public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit ignoreMalformed, Explicit coerce, @@ -101,8 +120,6 @@ public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, MappedF MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, ignoreMalformed, coerce, ignoreZValue, indexSettings, multiFields, copyTo); - geometryParser = new GeometryParser(orientation() == ShapeBuilder.Orientation.RIGHT, coerce().value(), ignoreZValue.value()); - geometryIndexer = new GeometryIndexer(true); } @Override @@ -110,35 +127,9 @@ public GeoShapeFieldType fieldType() { return (GeoShapeFieldType) super.fieldType(); } - /** parsing logic for {@link LatLonShape} indexing */ @Override - public void parse(ParseContext context) throws IOException { - try { - - Object shape = context.parseExternalValue(Object.class); - if (shape == null) { - Geometry geometry = geometryParser.parse(context.parser()); - if (geometry == null) { - return; - } - shape = geometryIndexer.prepareForIndexing(geometry); - } - indexShape(context, shape); - } catch (Exception e) { - if (ignoreMalformed.value() == false) { - throw new MapperParsingException("failed to parse field [{}] of type [{}]", e, fieldType().name(), - fieldType().typeName()); - } - context.addIgnoredField(fieldType().name()); - } - } - - private void indexShape(ParseContext context, Object luceneShape) { - if (luceneShape instanceof Geometry) { - ((Geometry) luceneShape).visit(new LuceneGeometryIndexer(context)); - } else { - throw new IllegalArgumentException("invalid shape type found [" + luceneShape.getClass() + "] while indexing shape"); - } + protected void indexShape(ParseContext context, Geometry luceneShape) { + luceneShape.visit(new LuceneGeometryIndexer(context)); } private class LuceneGeometryIndexer implements GeometryVisitor { @@ -232,4 +223,9 @@ private void indexFields(ParseContext context, Field[] fields) { context.doc().add(f); } } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeIndexer.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java rename to server/src/main/java/org/elasticsearch/index/mapper/GeoShapeIndexer.java index 6d6270e49bbfb..5f1742c99dbcd 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeometryIndexer.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeIndexer.java @@ -18,7 +18,7 @@ */ -package org.elasticsearch.common.geo; +package org.elasticsearch.index.mapper; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.geo.geometry.Circle; @@ -52,7 +52,7 @@ /** * Utility class that converts geometries into Lucene-compatible form */ -public final class GeometryIndexer { +public final class GeoShapeIndexer implements AbstractGeometryFieldMapper.Indexer { private static final double DATELINE = 180; @@ -60,7 +60,7 @@ public final class GeometryIndexer { private final boolean orientation; - public GeometryIndexer(boolean orientation) { + public GeoShapeIndexer(boolean orientation) { this.orientation = orientation; } @@ -176,6 +176,11 @@ public Geometry visit(Rectangle rectangle) { }); } + @Override + public Class processedClass() { + return Geometry.class; + } + /** * Calculate the intersection of a line segment and a vertical dateline. * @@ -666,10 +671,8 @@ private static Edge[] concat(int component, boolean direction, Point[] points, f * Array of edges will be ordered asc by the y-coordinate of the * intersections of edges. * - * @param dateline - * x-coordinate of the dateline - * @param edges - * set of edges that may intersect with the dateline + * @param dateline x-coordinate of the dateline + * @param edges set of edges that may intersect with the dateline * @return number of intersecting edges */ protected static int intersections(double dateline, Edge[] edges) { @@ -697,10 +700,10 @@ private static Edge[] edges(Edge[] edges, int numHoles, List> comp for (int i = 0; i < edges.length; i++) { if (edges[i].component >= 0) { - double[] partitionPoint = new double[3]; - int length = component(edges[i], -(components.size()+numHoles+1), mainEdges, partitionPoint); + double[] partitionPoint = new double[3]; + int length = component(edges[i], -(components.size() + numHoles + 1), mainEdges, partitionPoint); List component = new ArrayList<>(); - component.add(coordinates(edges[i], new Point[length+1], partitionPoint)); + component.add(coordinates(edges[i], new Point[length + 1], partitionPoint)); components.add(component); } } @@ -781,22 +784,22 @@ private static void assign(Edge[] holes, Point[][] points, int numHoles, Edge[] * This method sets the component id of all edges in a ring to a given id and shifts the * coordinates of this component according to the dateline * - * @param edge An arbitrary edge of the component - * @param id id to apply to the component + * @param edge An arbitrary edge of the component + * @param id id to apply to the component * @param edges a list of edges to which all edges of the component will be added (could be null) * @return number of edges that belong to this component */ private static int component(final Edge edge, final int id, final ArrayList edges, double[] partitionPoint) { // find a coordinate that is not part of the dateline Edge any = edge; - while(any.coordinate.getLon() == +DATELINE || any.coordinate.getLon() == -DATELINE) { - if((any = any.next) == edge) { + while (any.coordinate.getLon() == +DATELINE || any.coordinate.getLon() == -DATELINE) { + if ((any = any.next) == edge) { break; } } double shiftOffset = any.coordinate.getLon() > DATELINE ? DATELINE : (any.coordinate.getLon() < -DATELINE ? -DATELINE : 0); - + // run along the border of the component, collect the // edges, shift them according to the dateline and // update the component id @@ -847,14 +850,15 @@ private static int component(final Edge edge, final int id, final ArrayList buildPoints(List> components) { - List result = new ArrayList<>(components.size()); + private static List buildPoints(List> components) { + List result = new ArrayList<>(components.size()); for (int i = 0; i < components.size(); i++) { List component = components.get(i); result.add(buildPolygon(component)); @@ -885,7 +889,7 @@ private static List buildPoints(List> components) { } private static Polygon buildPolygon(List polygon) { - List holes; + List holes; Point[] shell = polygon.get(0); if (polygon.size() > 1) { holes = new ArrayList<>(polygon.size() - 1); @@ -899,7 +903,7 @@ private static Polygon buildPolygon(List polygon) { x[c] = normalizeLon(coords[c].getLon()); y[c] = normalizeLat(coords[c].getLat()); } - holes.add(new org.elasticsearch.geo.geometry.LinearRing(y, x)); + holes.add(new LinearRing(y, x)); } } else { holes = Collections.emptyList(); @@ -924,11 +928,12 @@ private static Point[][] holes(Edge[] holes, int numHoles) { final Point[][] points = new Point[numHoles][]; for (int i = 0; i < numHoles; i++) { - double[] partitionPoint = new double[3]; - int length = component(holes[i], -(i+1), null, partitionPoint); // mark as visited by inverting the sign - points[i] = coordinates(holes[i], new Point[length+1], partitionPoint); + double[] partitionPoint = new double[3]; + int length = component(holes[i], -(i + 1), null, partitionPoint); // mark as visited by inverting the sign + points[i] = coordinates(holes[i], new Point[length + 1], partitionPoint); } return points; } + } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java index c4996eab901dd..47adcbeb97237 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.query.LegacyGeoShapeQueryProcessor; import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.jts.JtsGeometry; @@ -79,7 +80,7 @@ * @deprecated use {@link GeoShapeFieldMapper} */ @Deprecated -public class LegacyGeoShapeFieldMapper extends BaseGeoShapeFieldMapper { +public class LegacyGeoShapeFieldMapper extends AbstractGeometryFieldMapper, Shape> { public static final String CONTENT_TYPE = "geo_shape"; @@ -183,7 +184,8 @@ private static void checkPrefixTreeSupport(String fieldName) { private static final Logger logger = LogManager.getLogger(LegacyGeoShapeFieldMapper.class); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger); - public static class Builder extends BaseGeoShapeFieldMapper.Builder { + public static class Builder extends AbstractGeometryFieldMapper.Builder { DeprecatedParameters deprecatedParameters; @@ -268,6 +270,10 @@ private void setupPrefixTrees() { protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); + fieldType().setGeometryIndexer(new LegacyGeoShapeIndexer()); + fieldType().setGeometryParser(ShapeParser::parse); + fieldType().setGeometryQueryBuilder(new LegacyGeoShapeQueryProcessor(fieldType())); + // field mapper handles this at build time // but prefix tree strategies require a name, so throw a similar exception if (fieldType().name().isEmpty()) { @@ -297,7 +303,7 @@ public LegacyGeoShapeFieldMapper build(BuilderContext context) { } } - public static final class GeoShapeFieldType extends BaseGeoShapeFieldType { + public static final class GeoShapeFieldType extends AbstractGeometryFieldType { private String tree = DeprecatedParameters.Defaults.TREE; private SpatialStrategy strategy = DeprecatedParameters.Defaults.STRATEGY; @@ -355,6 +361,11 @@ public int hashCode() { defaultDistanceErrorPct); } + @Override + public String typeName() { + return CONTENT_TYPE; + } + @Override public void checkCompatibility(MappedFieldType fieldType, List conflicts) { super.checkCompatibility(fieldType, conflicts); @@ -477,42 +488,26 @@ public GeoShapeFieldType fieldType() { } @Override - public void parse(ParseContext context) throws IOException { - try { - Shape shape = context.parseExternalValue(Shape.class); - if (shape == null) { - ShapeBuilder shapeBuilder = ShapeParser.parse(context.parser(), this); - if (shapeBuilder == null) { - return; - } - shape = shapeBuilder.buildS4J(); - } - if (fieldType().pointsOnly() == true) { - // index configured for pointsOnly - if (shape instanceof XShapeCollection && XShapeCollection.class.cast(shape).pointsOnly()) { - // MULTIPOINT data: index each point separately - List shapes = ((XShapeCollection) shape).getShapes(); - for (Shape s : shapes) { - indexShape(context, s); - } - return; - } else if (shape instanceof Point == false) { - throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " - + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) - + " was found"); + protected void indexShape(ParseContext context, Shape shape) { + if (fieldType().pointsOnly() == true) { + // index configured for pointsOnly + if (shape instanceof XShapeCollection && XShapeCollection.class.cast(shape).pointsOnly()) { + // MULTIPOINT data: index each point separately + @SuppressWarnings("unchecked") List shapes = ((XShapeCollection) shape).getShapes(); + for (Shape s : shapes) { + doIndexShape(context, s); } + return; + } else if (shape instanceof Point == false) { + throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " + + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + + " was found"); } - indexShape(context, shape); - } catch (Exception e) { - if (ignoreMalformed.value() == false) { - throw new MapperParsingException("failed to parse field [{}] of type [{}]", e, fieldType().name(), - fieldType().typeName()); - } - context.addIgnoredField(fieldType.name()); } + doIndexShape(context, shape); } - private void indexShape(ParseContext context, Shape shape) { + private void doIndexShape(ParseContext context, Shape shape) { List fields = new ArrayList<>(Arrays.asList(fieldType().defaultPrefixTreeStrategy().createIndexableFields(shape))); createFieldNamesField(context, fields); for (IndexableField field : fields) { @@ -571,4 +566,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, } } } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeIndexer.java b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeIndexer.java new file mode 100644 index 0000000000000..80e3a505f5c35 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeIndexer.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.locationtech.spatial4j.shape.Shape; + +public class LegacyGeoShapeIndexer implements AbstractGeometryFieldMapper.Indexer, Shape> { + @Override + public Shape prepareForIndexing(ShapeBuilder shapeBuilder) { + return shapeBuilder.buildS4J(); + } + + @Override + public Class processedClass() { + return Shape.class; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index 57bdb4446beac..7f54f8d261a0b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -20,33 +20,13 @@ package org.elasticsearch.index.query; import org.apache.logging.log4j.LogManager; -import org.apache.lucene.document.LatLonShape; -import org.apache.lucene.geo.Line; -import org.apache.lucene.geo.Polygon; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; -import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; -import org.apache.lucene.spatial.query.SpatialArgs; -import org.apache.lucene.spatial.query.SpatialOperation; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.geo.GeoShapeType; -import org.elasticsearch.common.geo.GeometryIndexer; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; -import org.elasticsearch.common.geo.builders.EnvelopeBuilder; -import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; -import org.elasticsearch.common.geo.builders.LineStringBuilder; -import org.elasticsearch.common.geo.builders.MultiLineStringBuilder; -import org.elasticsearch.common.geo.builders.MultiPointBuilder; -import org.elasticsearch.common.geo.builders.MultiPolygonBuilder; -import org.elasticsearch.common.geo.builders.PointBuilder; -import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.io.stream.StreamInput; @@ -54,32 +34,17 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.geo.geometry.Circle; import org.elasticsearch.geo.geometry.Geometry; -import org.elasticsearch.geo.geometry.GeometryCollection; -import org.elasticsearch.geo.geometry.GeometryVisitor; -import org.elasticsearch.geo.geometry.LinearRing; -import org.elasticsearch.geo.geometry.MultiLine; -import org.elasticsearch.geo.geometry.MultiPoint; -import org.elasticsearch.geo.geometry.MultiPolygon; -import org.elasticsearch.geo.geometry.Point; -import org.elasticsearch.geo.geometry.Rectangle; -import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; +import org.elasticsearch.index.mapper.AbstractGeometryFieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; import java.util.function.Supplier; -import static org.elasticsearch.index.mapper.GeoShapeFieldMapper.toLucenePolygon; - /** * Derived {@link AbstractGeometryQueryBuilder} that builds a lat, lon GeoShape Query */ @@ -217,12 +182,12 @@ public SpatialStrategy strategy() { @Override protected List validContentTypes() { - return Arrays.asList(BaseGeoShapeFieldMapper.CONTENT_TYPE); + return Arrays.asList(GeoShapeFieldMapper.CONTENT_TYPE); } @Override public String queryFieldType() { - return BaseGeoShapeFieldMapper.CONTENT_TYPE; + return GeoShapeFieldMapper.CONTENT_TYPE; } @Override @@ -245,260 +210,13 @@ protected GeoShapeQueryBuilder newShapeQueryBuilder(String fieldName, Supplier geometryToShapeBuilder(Geometry geometry) { - ShapeBuilder shapeBuilder = geometry.visit(new GeometryVisitor<>() { - @Override - public ShapeBuilder visit(Circle circle) { - throw new UnsupportedOperationException("circle is not supported"); - } - - @Override - public ShapeBuilder visit(GeometryCollection collection) { - GeometryCollectionBuilder shapes = new GeometryCollectionBuilder(); - for (Geometry geometry : collection) { - shapes.shape(geometry.visit(this)); - } - return shapes; - } - - @Override - public ShapeBuilder visit(org.elasticsearch.geo.geometry.Line line) { - List coordinates = new ArrayList<>(); - for (int i = 0; i < line.length(); i++) { - coordinates.add(new Coordinate(line.getLon(i), line.getLat(i), line.getAlt(i))); - } - return new LineStringBuilder(coordinates); - } - - @Override - public ShapeBuilder visit(LinearRing ring) { - throw new UnsupportedOperationException("circle is not supported"); - } - - @Override - public ShapeBuilder visit(MultiLine multiLine) { - MultiLineStringBuilder lines = new MultiLineStringBuilder(); - for (int i = 0; i < multiLine.size(); i++) { - lines.linestring((LineStringBuilder) visit(multiLine.get(i))); - } - return lines; - } - - @Override - public ShapeBuilder visit(MultiPoint multiPoint) { - List coordinates = new ArrayList<>(); - for (int i = 0; i < multiPoint.size(); i++) { - Point p = multiPoint.get(i); - coordinates.add(new Coordinate(p.getLon(), p.getLat(), p.getAlt())); - } - return new MultiPointBuilder(coordinates); - } - - @Override - public ShapeBuilder visit(MultiPolygon multiPolygon) { - MultiPolygonBuilder polygons = new MultiPolygonBuilder(); - for (int i = 0; i < multiPolygon.size(); i++) { - polygons.polygon((PolygonBuilder) visit(multiPolygon.get(i))); - } - return polygons; - } - - @Override - public ShapeBuilder visit(Point point) { - return new PointBuilder(point.getLon(), point.getLat()); - } - - @Override - public ShapeBuilder visit(org.elasticsearch.geo.geometry.Polygon polygon) { - PolygonBuilder polygonBuilder = - new PolygonBuilder((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getPolygon()), - ShapeBuilder.Orientation.RIGHT, false); - for (int i = 0; i < polygon.getNumberOfHoles(); i++) { - polygonBuilder.hole((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getHole(i))); - } - return polygonBuilder; - } - - @Override - public ShapeBuilder visit(Rectangle rectangle) { - return new EnvelopeBuilder(new Coordinate(rectangle.getMinLon(), rectangle.getMaxLat()), - new Coordinate(rectangle.getMaxLon(), rectangle.getMinLat())); - } - }); - return shapeBuilder; - } - - private class ShapeVisitor implements GeometryVisitor { - QueryShardContext context; - MappedFieldType fieldType; - - ShapeVisitor(QueryShardContext context) { - this.context = context; - this.fieldType = context.fieldMapper(fieldName); - } - - @Override - public Query visit(Circle circle) { - throw new QueryShardException(context, "Field [" + fieldName + "] found and unknown shape Circle"); - } - - @Override - public Query visit(GeometryCollection collection) { - BooleanQuery.Builder bqb = new BooleanQuery.Builder(); - visit(bqb, collection); - return bqb.build(); - } - - private void visit(BooleanQuery.Builder bqb, GeometryCollection collection) { - for (Geometry shape : collection) { - if (shape instanceof MultiPoint) { - // Flatten multipoints - visit(bqb, (GeometryCollection) shape); - } else { - bqb.add(shape.visit(this), BooleanClause.Occur.SHOULD); - } - } - } - - @Override - public Query visit(org.elasticsearch.geo.geometry.Line line) { - validateIsGeoShapeFieldType(); - return LatLonShape.newLineQuery(fieldName(), relation.getLuceneRelation(), new Line(line.getLats(), line.getLons())); - } - - @Override - public Query visit(LinearRing ring) { - throw new QueryShardException(context, "Field [" + fieldName + "] found and unsupported shape LinearRing"); - } - - @Override - public Query visit(MultiLine multiLine) { - validateIsGeoShapeFieldType(); - Line[] lines = new Line[multiLine.size()]; - for (int i=0; i + * This method is needed to handle legacy indices and will be removed when we no longer need to build JTS shapes + */ + private static Shape buildS4J(Geometry geometry) { + return geometryToShapeBuilder(geometry).buildS4J(); + } + + + public static ShapeBuilder geometryToShapeBuilder(Geometry geometry) { + ShapeBuilder shapeBuilder = geometry.visit(new GeometryVisitor<>() { + @Override + public ShapeBuilder visit(Circle circle) { + throw new UnsupportedOperationException("circle is not supported"); + } + + @Override + public ShapeBuilder visit(GeometryCollection collection) { + GeometryCollectionBuilder shapes = new GeometryCollectionBuilder(); + for (Geometry geometry : collection) { + shapes.shape(geometry.visit(this)); + } + return shapes; + } + + @Override + public ShapeBuilder visit(org.elasticsearch.geo.geometry.Line line) { + List coordinates = new ArrayList<>(); + for (int i = 0; i < line.length(); i++) { + coordinates.add(new Coordinate(line.getLon(i), line.getLat(i), line.getAlt(i))); + } + return new LineStringBuilder(coordinates); + } + + @Override + public ShapeBuilder visit(LinearRing ring) { + throw new UnsupportedOperationException("circle is not supported"); + } + + @Override + public ShapeBuilder visit(MultiLine multiLine) { + MultiLineStringBuilder lines = new MultiLineStringBuilder(); + for (int i = 0; i < multiLine.size(); i++) { + lines.linestring((LineStringBuilder) visit(multiLine.get(i))); + } + return lines; + } + + @Override + public ShapeBuilder visit(MultiPoint multiPoint) { + List coordinates = new ArrayList<>(); + for (int i = 0; i < multiPoint.size(); i++) { + Point p = multiPoint.get(i); + coordinates.add(new Coordinate(p.getLon(), p.getLat(), p.getAlt())); + } + return new MultiPointBuilder(coordinates); + } + + @Override + public ShapeBuilder visit(MultiPolygon multiPolygon) { + MultiPolygonBuilder polygons = new MultiPolygonBuilder(); + for (int i = 0; i < multiPolygon.size(); i++) { + polygons.polygon((PolygonBuilder) visit(multiPolygon.get(i))); + } + return polygons; + } + + @Override + public ShapeBuilder visit(Point point) { + return new PointBuilder(point.getLon(), point.getLat()); + } + + @Override + public ShapeBuilder visit(org.elasticsearch.geo.geometry.Polygon polygon) { + PolygonBuilder polygonBuilder = + new PolygonBuilder((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getPolygon()), + ShapeBuilder.Orientation.RIGHT, false); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + polygonBuilder.hole((LineStringBuilder) visit((org.elasticsearch.geo.geometry.Line) polygon.getHole(i))); + } + return polygonBuilder; + } + + @Override + public ShapeBuilder visit(Rectangle rectangle) { + return new EnvelopeBuilder(new Coordinate(rectangle.getMinLon(), rectangle.getMaxLat()), + new Coordinate(rectangle.getMaxLon(), rectangle.getMinLat())); + } + }); + return shapeBuilder; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/VectorGeoShapeQueryProcessor.java b/server/src/main/java/org/elasticsearch/index/query/VectorGeoShapeQueryProcessor.java new file mode 100644 index 0000000000000..1012e2ec045f3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/VectorGeoShapeQueryProcessor.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.document.LatLonShape; +import org.apache.lucene.geo.Line; +import org.apache.lucene.geo.Polygon; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.geo.GeoShapeType; +import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.geo.geometry.Circle; +import org.elasticsearch.geo.geometry.Geometry; +import org.elasticsearch.geo.geometry.GeometryCollection; +import org.elasticsearch.geo.geometry.GeometryVisitor; +import org.elasticsearch.geo.geometry.LinearRing; +import org.elasticsearch.geo.geometry.MultiLine; +import org.elasticsearch.geo.geometry.MultiPoint; +import org.elasticsearch.geo.geometry.MultiPolygon; +import org.elasticsearch.geo.geometry.Point; +import org.elasticsearch.index.mapper.AbstractGeometryFieldMapper; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.GeoShapeIndexer; +import org.elasticsearch.index.mapper.MappedFieldType; + +import static org.elasticsearch.index.mapper.GeoShapeFieldMapper.toLucenePolygon; + +public class VectorGeoShapeQueryProcessor implements AbstractGeometryFieldMapper.QueryProcessor { + + @Override + public Query process(Geometry shape, String fieldName, SpatialStrategy strategy, ShapeRelation relation, QueryShardContext context) { + // CONTAINS queries are not yet supported by VECTOR strategy + if (relation == ShapeRelation.CONTAINS) { + throw new QueryShardException(context, + ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]"); + } + // wrap geoQuery as a ConstantScoreQuery + return getVectorQueryFromShape(shape, fieldName, relation, context); + } + + protected Query getVectorQueryFromShape(Geometry queryShape, String fieldName, ShapeRelation relation, QueryShardContext context) { + GeoShapeIndexer geometryIndexer = new GeoShapeIndexer(true); + + Geometry processedShape = geometryIndexer.prepareForIndexing(queryShape); + + if (processedShape == null) { + return new MatchNoDocsQuery(); + } + return queryShape.visit(new ShapeVisitor(context, fieldName, relation)); + } + + private class ShapeVisitor implements GeometryVisitor { + QueryShardContext context; + MappedFieldType fieldType; + String fieldName; + ShapeRelation relation; + + ShapeVisitor(QueryShardContext context, String fieldName, ShapeRelation relation) { + this.context = context; + this.fieldType = context.fieldMapper(fieldName); + this.fieldName = fieldName; + this.relation = relation; + } + + @Override + public Query visit(Circle circle) { + throw new QueryShardException(context, "Field [" + fieldName + "] found and unknown shape Circle"); + } + + @Override + public Query visit(GeometryCollection collection) { + BooleanQuery.Builder bqb = new BooleanQuery.Builder(); + visit(bqb, collection); + return bqb.build(); + } + + private void visit(BooleanQuery.Builder bqb, GeometryCollection collection) { + for (Geometry shape : collection) { + if (shape instanceof MultiPoint) { + // Flatten multipoints + visit(bqb, (GeometryCollection) shape); + } else { + bqb.add(shape.visit(this), BooleanClause.Occur.SHOULD); + } + } + } + + @Override + public Query visit(org.elasticsearch.geo.geometry.Line line) { + validateIsGeoShapeFieldType(); + return LatLonShape.newLineQuery(fieldName, relation.getLuceneRelation(), new Line(line.getLats(), line.getLons())); + } + + @Override + public Query visit(LinearRing ring) { + throw new QueryShardException(context, "Field [" + fieldName + "] found and unsupported shape LinearRing"); + } + + @Override + public Query visit(MultiLine multiLine) { + validateIsGeoShapeFieldType(); + Line[] lines = new Line[multiLine.size()]; + for (int i = 0; i < multiLine.size(); i++) { + lines[i] = new Line(multiLine.get(i).getLats(), multiLine.get(i).getLons()); + } + return LatLonShape.newLineQuery(fieldName, relation.getLuceneRelation(), lines); + } + + @Override + public Query visit(MultiPoint multiPoint) { + throw new QueryShardException(context, "Field [" + fieldName + "] does not support " + GeoShapeType.MULTIPOINT + + " queries"); + } + + @Override + public Query visit(MultiPolygon multiPolygon) { + Polygon[] polygons = new Polygon[multiPolygon.size()]; + for (int i = 0; i < multiPolygon.size(); i++) { + polygons[i] = toLucenePolygon(multiPolygon.get(i)); + } + return LatLonShape.newPolygonQuery(fieldName, relation.getLuceneRelation(), polygons); + } + + @Override + public Query visit(Point point) { + validateIsGeoShapeFieldType(); + return LatLonShape.newBoxQuery(fieldName, relation.getLuceneRelation(), + point.getLat(), point.getLat(), point.getLon(), point.getLon()); + } + + @Override + public Query visit(org.elasticsearch.geo.geometry.Polygon polygon) { + return LatLonShape.newPolygonQuery(fieldName, relation.getLuceneRelation(), toLucenePolygon(polygon)); + } + + @Override + public Query visit(org.elasticsearch.geo.geometry.Rectangle r) { + return LatLonShape.newBoxQuery(fieldName, relation.getLuceneRelation(), + r.getMinLat(), r.getMaxLat(), r.getMinLon(), r.getMaxLon()); + } + + private void validateIsGeoShapeFieldType() { + if (fieldType instanceof GeoShapeFieldMapper.GeoShapeFieldType == false) { + throw new QueryShardException(context, "Expected " + GeoShapeFieldMapper.CONTENT_TYPE + + " field type for Field [" + fieldName + "] but found " + fieldType.typeName()); + } + } + } + +} + diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index b167c30e32c6e..de79acd7c2317 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.mapper.BaseGeoShapeFieldMapper; +import org.elasticsearch.index.mapper.AbstractGeometryFieldMapper; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.BooleanFieldMapper; import org.elasticsearch.index.mapper.CompletionFieldMapper; @@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.FieldAliasMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.GeoPointFieldMapper; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; @@ -132,7 +133,7 @@ public static Map getMappers(List mappe mappers.put(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser()); mappers.put(FieldAliasMapper.CONTENT_TYPE, new FieldAliasMapper.TypeParser()); mappers.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); - mappers.put(BaseGeoShapeFieldMapper.CONTENT_TYPE, new BaseGeoShapeFieldMapper.TypeParser()); + mappers.put(GeoShapeFieldMapper.CONTENT_TYPE, new AbstractGeometryFieldMapper.TypeParser()); for (MapperPlugin mapperPlugin : mapperPlugins) { for (Map.Entry entry : mapperPlugin.getMappers().entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java index 9e5d7d7c6ce09..2a2f7ce75a3ab 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.geo.utils.GeographyValidator; +import org.elasticsearch.index.mapper.GeoShapeIndexer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions; import org.locationtech.jts.geom.Geometry; @@ -66,7 +67,7 @@ protected void assertGeometryEquals(Object expected, XContentBuilder geoJson, bo } else { GeometryParser geometryParser = new GeometryParser(true, true, true); org.elasticsearch.geo.geometry.Geometry shape = geometryParser.parse(parser); - shape = new GeometryIndexer(true).prepareForIndexing(shape); + shape = new GeoShapeIndexer(true).prepareForIndexing(shape); ElasticsearchGeoAssertions.assertEquals(expected, shape); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java index ccfc599f4cadc..1242acc635f6d 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.geo.geometry.MultiLine; import org.elasticsearch.geo.geometry.MultiPoint; import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.GeoShapeIndexer; import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.VersionUtils; @@ -1425,7 +1426,7 @@ public void testParseInvalidGeometryCollectionShapes() throws IOException { public Geometry parse(XContentParser parser) throws IOException, ParseException { GeometryParser geometryParser = new GeometryParser(true, true, true); - GeometryIndexer indexer = new GeometryIndexer(true); + GeoShapeIndexer indexer = new GeoShapeIndexer(true); return indexer.prepareForIndexing(geometryParser.parse(parser)); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index d8559b3b1260e..11153f6679720 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.geo.geometry.MultiPoint; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.GeoShapeIndexer; import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.geo.RandomShapeGenerator; @@ -470,7 +471,7 @@ public void testParseGeometryCollection() throws IOException, ParseException { } else { GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(random()); assertExpected(gcb.buildS4J(), gcb, true); - assertExpected(new GeometryIndexer(true).prepareForIndexing(gcb.buildGeometry()), gcb, false); + assertExpected(new GeoShapeIndexer(true).prepareForIndexing(gcb.buildGeometry()), gcb, false); } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java index 14fc710e2683c..7694d6c3d7d62 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryIOTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.test.ESTestCase; import static org.elasticsearch.geo.GeometryTestUtils.randomGeometry; -import static org.elasticsearch.index.query.GeoShapeQueryBuilder.geometryToShapeBuilder; +import static org.elasticsearch.index.query.LegacyGeoShapeQueryProcessor.geometryToShapeBuilder; public class GeometryIOTests extends ESTestCase { diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java index 5ab5aaff33e05..12a3432eb36f2 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.geo.geometry.Point; import org.elasticsearch.geo.geometry.Polygon; import org.elasticsearch.geo.utils.WellKnownText; +import org.elasticsearch.index.mapper.GeoShapeIndexer; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -42,7 +43,7 @@ public class GeometryIndexerTests extends ESTestCase { - GeometryIndexer indexer = new GeometryIndexer(true); + GeoShapeIndexer indexer = new GeoShapeIndexer(true); private static final WellKnownText WKT = new WellKnownText(true, geometry -> { }); @@ -208,13 +209,13 @@ private Geometry expected(String wkt) throws IOException, ParseException { private Geometry actual(String wkt, boolean rightOrientation) throws IOException, ParseException { Geometry shape = parseGeometry(wkt, rightOrientation); - return new GeometryIndexer(true).prepareForIndexing(shape); + return new GeoShapeIndexer(true).prepareForIndexing(shape); } private Geometry actual(XContentBuilder geoJson, boolean rightOrientation) throws IOException, ParseException { Geometry shape = parseGeometry(geoJson, rightOrientation); - return new GeometryIndexer(true).prepareForIndexing(shape); + return new GeoShapeIndexer(true).prepareForIndexing(shape); } private Geometry parseGeometry(String wkt, boolean rightOrientation) throws IOException, ParseException { diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index bd6c4a2da557f..40ff3e8d44bc1 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.geo.builders.PointBuilder; import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.index.mapper.GeoShapeIndexer; import org.elasticsearch.test.ESTestCase; import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.LineString; @@ -778,6 +779,6 @@ public void testInvalidSelfCrossingPolygon() { } public Object buildGeometry(ShapeBuilder builder) { - return new GeometryIndexer(true).prepareForIndexing(builder.buildGeometry()); + return new GeoShapeIndexer(true).prepareForIndexing(builder.buildGeometry()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java index c165d55e3a5f5..b744ad1609488 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ExternalMapper.java @@ -86,7 +86,7 @@ public ExternalMapper build(BuilderContext context) { BinaryFieldMapper binMapper = binBuilder.build(context); BooleanFieldMapper boolMapper = boolBuilder.build(context); GeoPointFieldMapper pointMapper = latLonPointBuilder.build(context); - BaseGeoShapeFieldMapper shapeMapper = shapeBuilder.build(context); + AbstractGeometryFieldMapper shapeMapper = shapeBuilder.build(context); FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); context.path().remove(); @@ -150,13 +150,13 @@ public Query existsQuery(QueryShardContext context) { private BinaryFieldMapper binMapper; private BooleanFieldMapper boolMapper; private GeoPointFieldMapper pointMapper; - private BaseGeoShapeFieldMapper shapeMapper; + private AbstractGeometryFieldMapper shapeMapper; private FieldMapper stringMapper; public ExternalMapper(String simpleName, MappedFieldType fieldType, String generatedValue, String mapperName, BinaryFieldMapper binMapper, BooleanFieldMapper boolMapper, GeoPointFieldMapper pointMapper, - BaseGeoShapeFieldMapper shapeMapper, FieldMapper stringMapper, Settings indexSettings, + AbstractGeometryFieldMapper shapeMapper, FieldMapper stringMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, new ExternalFieldType(), indexSettings, multiFields, copyTo); this.generatedValue = generatedValue; @@ -214,7 +214,7 @@ public FieldMapper updateFieldType(Map fullNameToFieldT BinaryFieldMapper binMapperUpdate = (BinaryFieldMapper) binMapper.updateFieldType(fullNameToFieldType); BooleanFieldMapper boolMapperUpdate = (BooleanFieldMapper) boolMapper.updateFieldType(fullNameToFieldType); GeoPointFieldMapper pointMapperUpdate = (GeoPointFieldMapper) pointMapper.updateFieldType(fullNameToFieldType); - BaseGeoShapeFieldMapper shapeMapperUpdate = (BaseGeoShapeFieldMapper) shapeMapper.updateFieldType(fullNameToFieldType); + AbstractGeometryFieldMapper shapeMapperUpdate = (AbstractGeometryFieldMapper) shapeMapper.updateFieldType(fullNameToFieldType); TextFieldMapper stringMapperUpdate = (TextFieldMapper) stringMapper.updateFieldType(fullNameToFieldType); if (update == this && multiFieldsUpdate == multiFields diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index a5e2d7c31afe2..65485bd5f9782 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -280,7 +280,8 @@ public void testSerializeDefaults() throws Exception { .endObject().endObject()); DocumentMapper defaultMapper = parser.parse("type1", new CompressedXContent(mapping)); String serialized = toXContentString((GeoShapeFieldMapper) defaultMapper.mappers().getMapper("location")); - assertTrue(serialized, serialized.contains("\"orientation\":\"" + BaseGeoShapeFieldMapper.Defaults.ORIENTATION.value() + "\"")); + assertTrue(serialized, serialized.contains("\"orientation\":\"" + + AbstractGeometryFieldMapper.Defaults.ORIENTATION.value() + "\"")); } } From 3f31859669d1c77c119005b8dfff4bcb20c44e93 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 26 Jul 2019 10:47:03 -0700 Subject: [PATCH 48/51] [DOCS] Updates terms in machine learning datafeed APIs (#44883) --- .../high-level/ml/delete-datafeed.asciidoc | 15 ++++++----- .../high-level/ml/put-datafeed.asciidoc | 27 ++++++++++--------- .../high-level/ml/start-datafeed.asciidoc | 13 +++++---- .../high-level/ml/update-datafeed.asciidoc | 24 +++++++++-------- .../apis/put-datafeed.asciidoc | 6 ++--- .../apis/start-datafeed.asciidoc | 7 ++--- .../apis/update-datafeed.asciidoc | 2 +- 7 files changed, 49 insertions(+), 45 deletions(-) diff --git a/docs/java-rest/high-level/ml/delete-datafeed.asciidoc b/docs/java-rest/high-level/ml/delete-datafeed.asciidoc index 02bfafd795187..ddc8c352729f1 100644 --- a/docs/java-rest/high-level/ml/delete-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/delete-datafeed.asciidoc @@ -4,10 +4,12 @@ :response: AcknowledgedResponse -- [id="{upid}-delete-datafeed"] -=== Delete Datafeed API +=== Delete datafeed API + +Deletes an existing datafeed. [id="{upid}-{api}-request"] -==== Delete Datafeed Request +==== Delete datafeed request A +{request}+ object requires a non-null `datafeedId` and can optionally set `force`. @@ -15,18 +17,17 @@ A +{request}+ object requires a non-null `datafeedId` and can optionally set `fo --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Use to forcefully delete a started datafeed; -this method is quicker than stopping and deleting the datafeed. -Defaults to `false`. +<1> Use to forcefully delete a started datafeed. This method is quicker than +stopping and deleting the datafeed. Defaults to `false`. include::../execution.asciidoc[] [id="{upid}-{api}-response"] -==== Delete Datafeed Response +==== Delete datafeed response The returned +{response}+ object indicates the acknowledgement of the request: ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] --------------------------------------------------- -<1> `isAcknowledged` was the deletion request acknowledged or not +<1> `isAcknowledged` was the deletion request acknowledged or not. diff --git a/docs/java-rest/high-level/ml/put-datafeed.asciidoc b/docs/java-rest/high-level/ml/put-datafeed.asciidoc index 8b2b4dd27f1bf..cce72a5ee5f18 100644 --- a/docs/java-rest/high-level/ml/put-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/put-datafeed.asciidoc @@ -4,14 +4,13 @@ :response: PutDatafeedResponse -- [id="{upid}-{api}"] -=== Put Datafeed API +=== Put datafeed API -The Put Datafeed API can be used to create a new {ml} datafeed -in the cluster. The API accepts a +{request}+ object +Creates a new {ml} datafeed in the cluster. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Put Datafeed Request +==== Put datafeed request A +{request}+ requires the following argument: @@ -19,10 +18,10 @@ A +{request}+ requires the following argument: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> The configuration of the {ml} datafeed to create +<1> The configuration of the {ml} datafeed to create. [id="{upid}-{api}-config"] -==== Datafeed Configuration +==== Datafeed configuration The `DatafeedConfig` object contains all the details about the {ml} datafeed configuration. @@ -33,10 +32,10 @@ A `DatafeedConfig` requires the following arguments: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-config] -------------------------------------------------- -<1> The datafeed ID and the job ID -<2> The indices that contain the data to retrieve and feed into the job +<1> The datafeed ID and the {anomaly-job} ID. +<2> The indices that contain the data to retrieve and feed into the {anomaly-job}. -==== Optional Arguments +==== Optional arguments The following arguments are optional: ["source","java",subs="attributes,callouts,macros"] @@ -49,7 +48,8 @@ include-tagged::{doc-tests-file}[{api}-config-set-chunking-config] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-config-set-frequency] -------------------------------------------------- -<1> The interval at which scheduled queries are made while the datafeed runs in real time. +<1> The interval at which scheduled queries are made while the datafeed runs in +real time. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -72,8 +72,9 @@ The window must be larger than the Job's bucket size, but smaller than 24 hours, and span less than 10,000 buckets. Defaults to `null`, which causes an appropriate window span to be calculated when the datafeed runs. -The default `check_window` span calculation is the max between `2h` or `8 * bucket_span`. -To explicitly disable, pass `DelayedDataCheckConfig.disabledDelayedDataCheckConfig()`. +The default `check_window` span calculation is the max between `2h` or +`8 * bucket_span`. To explicitly disable, pass +`DelayedDataCheckConfig.disabledDelayedDataCheckConfig()`. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -101,4 +102,4 @@ default values: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- -<1> The created datafeed +<1> The created datafeed. diff --git a/docs/java-rest/high-level/ml/start-datafeed.asciidoc b/docs/java-rest/high-level/ml/start-datafeed.asciidoc index 9c3b096634d81..821b404b0a529 100644 --- a/docs/java-rest/high-level/ml/start-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/start-datafeed.asciidoc @@ -4,14 +4,13 @@ :response: StartDatafeedResponse -- [id="{upid}-{api}"] -=== Start Datafeed API +=== Start datafeed API -The Start Datafeed API provides the ability to start a {ml} datafeed in the cluster. -It accepts a +{request}+ object and responds -with a +{response}+ object. +Starts a {ml} datafeed in the cluster. It accepts a +{request}+ object and +responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Start Datafeed Request +==== Start datafeed request A +{request}+ object is created referencing a non-null `datafeedId`. All other fields are optional for the request. @@ -20,9 +19,9 @@ All other fields are optional for the request. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Constructing a new request referencing an existing `datafeedId` +<1> Constructing a new request referencing an existing `datafeedId`. -==== Optional Arguments +==== Optional arguments The following arguments are optional. diff --git a/docs/java-rest/high-level/ml/update-datafeed.asciidoc b/docs/java-rest/high-level/ml/update-datafeed.asciidoc index 0073bfb7bce6e..c27efdb7d186f 100644 --- a/docs/java-rest/high-level/ml/update-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/update-datafeed.asciidoc @@ -4,14 +4,13 @@ :response: PutDatafeedResponse -- [id="{upid}-{api}"] -=== Update Datafeed API +=== Update datafeed API -The Update Datafeed API can be used to update a {ml} datafeed -in the cluster. The API accepts a +{request}+ object +Updates a {ml} datafeed in the cluster. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Update Datafeed Request +==== Update datafeed request A +{request}+ requires the following argument: @@ -22,7 +21,7 @@ include-tagged::{doc-tests-file}[{api}-request] <1> The updated configuration of the {ml} datafeed [id="{upid}-{api}-config"] -==== Updated Datafeed Arguments +==== Updated datafeed arguments A `DatafeedUpdate` requires an existing non-null `datafeedId` and allows updating various settings. @@ -31,12 +30,15 @@ allows updating various settings. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-config] -------------------------------------------------- -<1> Mandatory, non-null `datafeedId` referencing an existing {ml} datafeed -<2> Optional, set the datafeed Aggregations for data gathering -<3> Optional, the indices that contain the data to retrieve and feed into the job +<1> Mandatory, non-null `datafeedId` referencing an existing {ml} datafeed. +<2> Optional, set the datafeed aggregations for data gathering. +<3> Optional, the indices that contain the data to retrieve and feed into the +{anomaly-job}. <4> Optional, specifies how data searches are split into time chunks. -<5> Optional, the interval at which scheduled queries are made while the datafeed runs in real time. -<6> Optional, a query to filter the search results by. Defaults to the `match_all` query. +<5> Optional, the interval at which scheduled queries are made while the +datafeed runs in real time. +<6> Optional, a query to filter the search results by. Defaults to the +`match_all` query. <7> Optional, the time interval behind real time that data is queried. <8> Optional, allows the use of script fields. <9> Optional, the `size` parameter used in the searches. @@ -53,4 +55,4 @@ the updated {ml} datafeed if it has been successfully updated. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- -<1> The updated datafeed +<1> The updated datafeed. diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index fa99d311ff015..933cd8de0ec2e 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -18,7 +18,7 @@ Instantiates a {dfeed}. [[ml-put-datafeed-prereqs]] ==== {api-prereq-title} -* You must create a job before you create a {dfeed}. +* You must create an {anomaly-job} before you create a {dfeed}. * If {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See {stack-ov}/security-privileges.html[Security privileges]. @@ -26,7 +26,7 @@ cluster privileges to use this API. See [[ml-put-datafeed-desc]] ==== {api-description-title} -You can associate only one {dfeed} to each job. +You can associate only one {dfeed} to each {anomaly-job}. [IMPORTANT] ==== @@ -75,7 +75,7 @@ those same roles. `job_id`:: (Required, string) A numerical character string that uniquely identifies the - job. + {anomaly-job}. `query`:: (Optional, object) The {es} query domain-specific language (DSL). This value diff --git a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc index ceaa95585620d..1d2cfb21e1ced 100644 --- a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc @@ -18,8 +18,8 @@ Starts one or more {dfeeds}. [[ml-start-datafeed-prereqs]] ==== {api-prereq-title} -* Before you can start a {dfeed}, the job must be open. Otherwise, an error -occurs. +* Before you can start a {dfeed}, the {anomaly-job} must be open. Otherwise, an +error occurs. * If {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See {stack-ov}/security-privileges.html[Security privileges]. @@ -36,7 +36,8 @@ If you want to analyze from the beginning of a dataset, you can specify any date earlier than that beginning date. If you do not specify a start time and the {dfeed} is associated with a new -job, the analysis starts from the earliest time for which data is available. +{anomaly-job}, the analysis starts from the earliest time for which data is +available. When you start a {dfeed}, you can also specify an end time. If you do so, the job analyzes data from the start time until the end time, at which point the diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index f6b835cda2814..942184bda32e0 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -67,7 +67,7 @@ The following properties can be updated after the {dfeed} is created: `job_id`:: (Optional, string) A numerical character string that uniquely identifies the - job. + {anomaly-job}. `query`:: (Optional, object) The {es} query domain-specific language (DSL). This value From 75999ff83c3e83c3a2bd7a32f9d288e99d223675 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 26 Jul 2019 11:07:01 -0700 Subject: [PATCH 49/51] [DOCS] Updates anomaly detection terminology (#44888) --- .../anomaly-detection/aggregations.asciidoc | 6 +- .../ml/anomaly-detection/categories.asciidoc | 6 +- .../ml/anomaly-detection/configuring.asciidoc | 4 +- .../ml/anomaly-detection/customurl.asciidoc | 23 +++--- .../delayed-data-detection.asciidoc | 14 ++-- .../detector-custom-rules.asciidoc | 41 +++++----- .../ml/anomaly-detection/functions.asciidoc | 12 +-- .../functions/count.asciidoc | 37 ++++----- .../anomaly-detection/functions/geo.asciidoc | 16 ++-- .../anomaly-detection/functions/info.asciidoc | 16 ++-- .../functions/metric.asciidoc | 76 ++++++++++--------- .../anomaly-detection/functions/rare.asciidoc | 37 ++++----- .../anomaly-detection/functions/sum.asciidoc | 17 ++--- .../anomaly-detection/functions/time.asciidoc | 43 ++++++----- .../ml/anomaly-detection/populations.asciidoc | 6 +- .../ml/anomaly-detection/stopping-ml.asciidoc | 34 +++++---- .../ml/anomaly-detection/transforms.asciidoc | 26 ++++--- 17 files changed, 213 insertions(+), 201 deletions(-) diff --git a/docs/reference/ml/anomaly-detection/aggregations.asciidoc b/docs/reference/ml/anomaly-detection/aggregations.asciidoc index 1fad9f1b2bb29..fecf99df53e50 100644 --- a/docs/reference/ml/anomaly-detection/aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/aggregations.asciidoc @@ -4,7 +4,7 @@ By default, {dfeeds} fetch data from {es} using search and scroll requests. It can be significantly more efficient, however, to aggregate data in {es} -and to configure your jobs to analyze aggregated data. +and to configure your {anomaly-jobs} to analyze aggregated data. One of the benefits of aggregating data this way is that {es} automatically distributes these calculations across your cluster. You can then feed this @@ -19,8 +19,8 @@ of the last record in the bucket. If you use a terms aggregation and the cardinality of a term is high, then the aggregation might not be effective and you might want to just use the default search and scroll behavior. -When you create or update a job, you can include the names of aggregations, for -example: +When you create or update an {anomaly-job}, you can include the names of +aggregations, for example: [source,js] ---------------------------------- diff --git a/docs/reference/ml/anomaly-detection/categories.asciidoc b/docs/reference/ml/anomaly-detection/categories.asciidoc index a75bf2a2eed75..aa7fd8a450239 100644 --- a/docs/reference/ml/anomaly-detection/categories.asciidoc +++ b/docs/reference/ml/anomaly-detection/categories.asciidoc @@ -68,8 +68,8 @@ we do not want the detailed SQL to be considered in the message categorization. This particular categorization filter removes the SQL statement from the categorization algorithm. -If your data is stored in {es}, you can create an advanced job with these same -properties: +If your data is stored in {es}, you can create an advanced {anomaly-job} with +these same properties: [role="screenshot"] image::images/ml-category-advanced.jpg["Advanced job configuration options related to categorization"] @@ -209,7 +209,7 @@ letters in tokens whereas the `ml_classic` tokenizer does, although that could be fixed by using more complex regular expressions. For more information about the `categorization_analyzer` property, see -{ref}/ml-job-resource.html#ml-categorizationanalyzer[Categorization Analyzer]. +{ref}/ml-job-resource.html#ml-categorizationanalyzer[Categorization analyzer]. NOTE: To add the `categorization_analyzer` property in {kib}, you must use the **Edit JSON** tab and copy the `categorization_analyzer` object from one of the diff --git a/docs/reference/ml/anomaly-detection/configuring.asciidoc b/docs/reference/ml/anomaly-detection/configuring.asciidoc index a1a2f477d8134..759c0e2153562 100644 --- a/docs/reference/ml/anomaly-detection/configuring.asciidoc +++ b/docs/reference/ml/anomaly-detection/configuring.asciidoc @@ -7,8 +7,8 @@ your cluster and all master-eligible nodes must have {ml} enabled. By default, all nodes are {ml} nodes. For more information about these settings, see {ref}/modules-node.html#ml-node[{ml} nodes]. -To use the {ml-features} to analyze your data, you must create a job and -send your data to that job. +To use the {ml-features} to analyze your data, you can create an {anomaly-job} +and send your data to that job. * If your data is stored in {es}: diff --git a/docs/reference/ml/anomaly-detection/customurl.asciidoc b/docs/reference/ml/anomaly-detection/customurl.asciidoc index bb88d96ed5013..89a5f5d8ce894 100644 --- a/docs/reference/ml/anomaly-detection/customurl.asciidoc +++ b/docs/reference/ml/anomaly-detection/customurl.asciidoc @@ -2,17 +2,17 @@ [[ml-configuring-url]] === Adding custom URLs to machine learning results -When you create an advanced job or edit any job in {kib}, you can optionally -attach one or more custom URLs. +When you create an advanced {anomaly-job} or edit any {anomaly-jobs} in {kib}, +you can optionally attach one or more custom URLs. The custom URLs provide links from the anomalies table in the *Anomaly Explorer* or *Single Metric Viewer* window in {kib} to {kib} dashboards, the *Discovery* page, or external websites. For example, you can define a custom URL that provides a way for users to drill down to the source data from the results set. -When you edit a job in {kib}, it simplifies the creation of the custom URLs for -{kib} dashboards and the *Discover* page and it enables you to test your URLs. -For example: +When you edit an {anomaly-job} in {kib}, it simplifies the creation of the +custom URLs for {kib} dashboards and the *Discover* page and it enables you to +test your URLs. For example: [role="screenshot"] image::images/ml-customurl-edit.jpg["Edit a job to add a custom URL"] @@ -29,7 +29,8 @@ As in this case, the custom URL can contain are populated when you click the link in the anomalies table. In this example, the custom URL contains `$earliest$`, `$latest$`, and `$service$` tokens, which pass the beginning and end of the time span of the selected anomaly and the -pertinent `service` field value to the target page. If you were interested in the following anomaly, for example: +pertinent `service` field value to the target page. If you were interested in +the following anomaly, for example: [role="screenshot"] image::images/ml-customurl.jpg["An example of the custom URL links in the Anomaly Explorer anomalies table"] @@ -43,8 +44,8 @@ image::images/ml-customurl-discover.jpg["An example of the results on the Discov Since we specified a time range of 2 hours, the time filter restricts the results to the time period two hours before and after the anomaly. -You can also specify these custom URL settings when you create or update jobs by -using the {ml} APIs. +You can also specify these custom URL settings when you create or update +{anomaly-jobs} by using the APIs. [float] [[ml-configuring-url-strings]] @@ -74,9 +75,9 @@ time as the earliest and latest times. The same is also true if the interval is set to `Auto` and a one hour interval was chosen. You can override this behavior by using the `time_range` setting. -The `$mlcategoryregex$` and `$mlcategoryterms$` tokens pertain to jobs where you -are categorizing field values. For more information about this type of analysis, -see <>. +The `$mlcategoryregex$` and `$mlcategoryterms$` tokens pertain to {anomaly-jobs} +where you are categorizing field values. For more information about this type of +analysis, see <>. The `$mlcategoryregex$` token passes the regular expression value of the category of the selected anomaly, as identified by the value of the `mlcategory` diff --git a/docs/reference/ml/anomaly-detection/delayed-data-detection.asciidoc b/docs/reference/ml/anomaly-detection/delayed-data-detection.asciidoc index 872a45d724893..625f839a86834 100644 --- a/docs/reference/ml/anomaly-detection/delayed-data-detection.asciidoc +++ b/docs/reference/ml/anomaly-detection/delayed-data-detection.asciidoc @@ -22,8 +22,8 @@ functions are not really affected. In these situations, it all comes out okay in the end as the delayed data is distributed randomly. An example would be a `mean` metric for a field in a large collection of data. In this case, checking for delayed data may not provide much benefit. If data are consistently delayed, -however, jobs with a `low_count` function may provide false positives. In this -situation, it would be useful to see if data comes in after an anomaly is +however, {anomaly-jobs} with a `low_count` function may provide false positives. +In this situation, it would be useful to see if data comes in after an anomaly is recorded so that you can determine a next course of action. ==== How do we detect delayed data? @@ -35,11 +35,11 @@ Every 15 minutes or every `check_window`, whichever is smaller, the datafeed triggers a document search over the configured indices. This search looks over a time span with a length of `check_window` ending with the latest finalized bucket. That time span is partitioned into buckets, whose length equals the bucket span -of the associated job. The `doc_count` of those buckets are then compared with -the job's finalized analysis buckets to see whether any data has arrived since -the analysis. If there is indeed missing data due to their ingest delay, the end -user is notified. For example, you can see annotations in {kib} for the periods -where these delays occur. +of the associated {anomaly-job}. The `doc_count` of those buckets are then +compared with the job's finalized analysis buckets to see whether any data has +arrived since the analysis. If there is indeed missing data due to their ingest +delay, the end user is notified. For example, you can see annotations in {kib} +for the periods where these delays occur. ==== What to do about delayed data? diff --git a/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc b/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc index ac0af6217394e..fc00d11cea64f 100644 --- a/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc +++ b/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc @@ -16,17 +16,18 @@ Let us see how those can be configured by examples. ==== Specifying custom rule scope -Let us assume we are configuring a job in order to detect DNS data exfiltration. -Our data contain fields "subdomain" and "highest_registered_domain". -We can use a detector that looks like `high_info_content(subdomain) over highest_registered_domain`. -If we run such a job it is possible that we discover a lot of anomalies on -frequently used domains that we have reasons to trust. As security analysts, we -are not interested in such anomalies. Ideally, we could instruct the detector to -skip results for domains that we consider safe. Using a rule with a scope allows -us to achieve this. +Let us assume we are configuring an {anomaly-job} in order to detect DNS data +exfiltration. Our data contain fields "subdomain" and "highest_registered_domain". +We can use a detector that looks like +`high_info_content(subdomain) over highest_registered_domain`. If we run such a +job, it is possible that we discover a lot of anomalies on frequently used +domains that we have reasons to trust. As security analysts, we are not +interested in such anomalies. Ideally, we could instruct the detector to skip +results for domains that we consider safe. Using a rule with a scope allows us +to achieve this. First, we need to create a list of our safe domains. Those lists are called -_filters_ in {ml}. Filters can be shared across jobs. +_filters_ in {ml}. Filters can be shared across {anomaly-jobs}. We create our filter using the {ref}/ml-put-filter.html[put filter API]: @@ -41,8 +42,8 @@ PUT _ml/filters/safe_domains // CONSOLE // TEST[skip:needs-licence] -Now, we can create our job specifying a scope that uses the `safe_domains` -filter for the `highest_registered_domain` field: +Now, we can create our {anomaly-job} specifying a scope that uses the +`safe_domains` filter for the `highest_registered_domain` field: [source,js] ---------------------------------- @@ -139,8 +140,8 @@ example, 0.02. Given our knowledge about how CPU utilization behaves we might determine that anomalies with such small actual values are not interesting for investigation. -Let us now configure a job with a rule that will skip results where CPU -utilization is less than 0.20. +Let us now configure an {anomaly-job} with a rule that will skip results where +CPU utilization is less than 0.20. [source,js] ---------------------------------- @@ -214,18 +215,18 @@ PUT _ml/anomaly_detectors/rule_with_range ==== Custom rules in the life-cycle of a job Custom rules only affect results created after the rules were applied. -Let us imagine that we have configured a job and it has been running +Let us imagine that we have configured an {anomaly-job} and it has been running for some time. After observing its results we decide that we can employ rules in order to get rid of some uninteresting results. We can use -the {ref}/ml-update-job.html[update job API] to do so. However, the rule we -added will only be in effect for any results created from the moment we added -the rule onwards. Past results will remain unaffected. +the {ref}/ml-update-job.html[update {anomaly-job} API] to do so. However, the +rule we added will only be in effect for any results created from the moment we +added the rule onwards. Past results will remain unaffected. -==== Using custom rules VS filtering data +==== Using custom rules vs. filtering data It might appear like using rules is just another way of filtering the data -that feeds into a job. For example, a rule that skips results when the -partition field value is in a filter sounds equivalent to having a query +that feeds into an {anomaly-job}. For example, a rule that skips results when +the partition field value is in a filter sounds equivalent to having a query that filters out such documents. But it is not. There is a fundamental difference. When the data is filtered before reaching a job it is as if they never existed for the job. With rules, the data still reaches the job and diff --git a/docs/reference/ml/anomaly-detection/functions.asciidoc b/docs/reference/ml/anomaly-detection/functions.asciidoc index 54a648635876d..d821a3ff4c023 100644 --- a/docs/reference/ml/anomaly-detection/functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions.asciidoc @@ -5,10 +5,10 @@ The {ml-features} include analysis functions that provide a wide variety of flexible ways to analyze data for anomalies. -When you create jobs, you specify one or more detectors, which define the type of -analysis that needs to be done. If you are creating your job by using {ml} APIs, -you specify the functions in -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +When you create {anomaly-jobs}, you specify one or more detectors, which define +the type of analysis that needs to be done. If you are creating your job by +using {ml} APIs, you specify the functions in +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. If you are creating your job in {kib}, you specify the functions differently depending on whether you are creating single metric, multi-metric, or advanced jobs. @@ -24,8 +24,8 @@ You can specify a `summary_count_field_name` with any function except `metric`. When you use `summary_count_field_name`, the {ml} features expect the input data to be pre-aggregated. The value of the `summary_count_field_name` field must contain the count of raw events that were summarized. In {kib}, use the -**summary_count_field_name** in advanced jobs. Analyzing aggregated input data -provides a significant boost in performance. For more information, see +**summary_count_field_name** in advanced {anomaly-jobs}. Analyzing aggregated +input data provides a significant boost in performance. For more information, see <>. If your data is sparse, there may be gaps in the data which means you might have diff --git a/docs/reference/ml/anomaly-detection/functions/count.asciidoc b/docs/reference/ml/anomaly-detection/functions/count.asciidoc index 404ed7f2d94a3..02a9cd2b08e52 100644 --- a/docs/reference/ml/anomaly-detection/functions/count.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/count.asciidoc @@ -40,7 +40,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, -see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing events with the count function [source,js] @@ -65,8 +65,9 @@ This example is probably the simplest possible analysis. It identifies time buckets during which the overall count of events is higher or lower than usual. -When you use this function in a detector in your job, it models the event rate -and detects when the event rate is unusual compared to its past behavior. +When you use this function in a detector in your {anomaly-job}, it models the +event rate and detects when the event rate is unusual compared to its past +behavior. .Example 2: Analyzing errors with the high_count function [source,js] @@ -89,7 +90,7 @@ PUT _ml/anomaly_detectors/example2 // CONSOLE // TEST[skip:needs-licence] -If you use this `high_count` function in a detector in your job, it +If you use this `high_count` function in a detector in your {anomaly-job}, it models the event rate for each error code. It detects users that generate an unusually high count of error codes compared to other users. @@ -117,9 +118,9 @@ PUT _ml/anomaly_detectors/example3 In this example, the function detects when the count of events for a status code is lower than usual. -When you use this function in a detector in your job, it models the event rate -for each status code and detects when a status code has an unusually low count -compared to its past behavior. +When you use this function in a detector in your {anomaly-job}, it models the +event rate for each status code and detects when a status code has an unusually +low count compared to its past behavior. .Example 4: Analyzing aggregated data with the count function [source,js] @@ -168,7 +169,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, -see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. For example, if you have the following number of events per bucket: @@ -206,10 +207,10 @@ PUT _ml/anomaly_detectors/example5 // CONSOLE // TEST[skip:needs-licence] -If you use this `high_non_zero_count` function in a detector in your job, it -models the count of events for the `signaturename` field. It ignores any buckets -where the count is zero and detects when a `signaturename` value has an -unusually high count of events compared to its past behavior. +If you use this `high_non_zero_count` function in a detector in your +{anomaly-job}, it models the count of events for the `signaturename` field. It +ignores any buckets where the count is zero and detects when a `signaturename` +value has an unusually high count of events compared to its past behavior. NOTE: Population analysis (using an `over_field_name` property value) is not supported for the `non_zero_count`, `high_non_zero_count`, and @@ -238,7 +239,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, -see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 6: Analyzing users with the distinct_count function [source,js] @@ -261,9 +262,9 @@ PUT _ml/anomaly_detectors/example6 // TEST[skip:needs-licence] This `distinct_count` function detects when a system has an unusual number -of logged in users. When you use this function in a detector in your job, it -models the distinct count of users. It also detects when the distinct number of -users is unusual compared to the past. +of logged in users. When you use this function in a detector in your +{anomaly-job}, it models the distinct count of users. It also detects when the +distinct number of users is unusual compared to the past. .Example 7: Analyzing ports with the high_distinct_count function [source,js] @@ -287,6 +288,6 @@ PUT _ml/anomaly_detectors/example7 // TEST[skip:needs-licence] This example detects instances of port scanning. When you use this function in a -detector in your job, it models the distinct count of ports. It also detects the -`src_ip` values that connect to an unusually high number of different +detector in your {anomaly-job}, it models the distinct count of ports. It also +detects the `src_ip` values that connect to an unusually high number of different `dst_ports` values compared to other `src_ip` values. diff --git a/docs/reference/ml/anomaly-detection/functions/geo.asciidoc b/docs/reference/ml/anomaly-detection/functions/geo.asciidoc index ee666c83094f0..7469bb963d418 100644 --- a/docs/reference/ml/anomaly-detection/functions/geo.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/geo.asciidoc @@ -7,9 +7,9 @@ input data. The {ml-features} include the following geographic function: `lat_long`. -NOTE: You cannot create forecasts for jobs that contain geographic functions. -You also cannot add rules with conditions to detectors that use geographic -functions. +NOTE: You cannot create forecasts for {anomaly-jobs} that contain geographic +functions. You also cannot add rules with conditions to detectors that use +geographic functions. [float] [[ml-lat-long]] @@ -26,7 +26,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, -see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing transactions with the lat_long function [source,js] @@ -49,15 +49,15 @@ PUT _ml/anomaly_detectors/example1 // CONSOLE // TEST[skip:needs-licence] -If you use this `lat_long` function in a detector in your job, it +If you use this `lat_long` function in a detector in your {anomaly-job}, it detects anomalies where the geographic location of a credit card transaction is unusual for a particular customer’s credit card. An anomaly might indicate fraud. IMPORTANT: The `field_name` that you supply must be a single string that contains two comma-separated numbers of the form `latitude,longitude`, a `geo_point` field, a `geo_shape` field that contains point values, or a `geo_centroid` aggregation. -The `latitude` and `longitude` must be in the range -180 to 180 and represent a point on the -surface of the Earth. +The `latitude` and `longitude` must be in the range -180 to 180 and represent a +point on the surface of the Earth. For example, JSON data might contain the following transaction coordinates: @@ -75,6 +75,6 @@ In {es}, location data is likely to be stored in `geo_point` fields. For more information, see {ref}/geo-point.html[Geo-point datatype]. This data type is supported natively in {ml-features}. Specifically, {dfeed} when pulling data from a `geo_point` field, will transform the data into the appropriate `lat,lon` string -format before sending to the {ml} job. +format before sending to the {anomaly-job}. For more information, see <>. diff --git a/docs/reference/ml/anomaly-detection/functions/info.asciidoc b/docs/reference/ml/anomaly-detection/functions/info.asciidoc index c75440f238ff5..18eb6d9f4e987 100644 --- a/docs/reference/ml/anomaly-detection/functions/info.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/info.asciidoc @@ -29,7 +29,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing subdomain strings with the info_content function [source,js] @@ -42,9 +42,9 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `info_content` function in a detector in your job, it models -information that is present in the `subdomain` string. It detects anomalies -where the information content is unusual compared to the other +If you use this `info_content` function in a detector in your {anomaly-job}, it +models information that is present in the `subdomain` string. It detects +anomalies where the information content is unusual compared to the other `highest_registered_domain` values. An anomaly could indicate an abuse of the DNS protocol, such as malicious command and control activity. @@ -63,8 +63,8 @@ choice. -------------------------------------------------- // NOTCONSOLE -If you use this `high_info_content` function in a detector in your job, it -models information content that is held in the DNS query string. It detects +If you use this `high_info_content` function in a detector in your {anomaly-job}, +it models information content that is held in the DNS query string. It detects `src_ip` values where the information content is unusually high compared to other `src_ip` values. This example is similar to the example for the `info_content` function, but it reports anomalies only where the amount of @@ -81,8 +81,8 @@ information content is higher than expected. -------------------------------------------------- // NOTCONSOLE -If you use this `low_info_content` function in a detector in your job, it models -information content that is present in the message string for each +If you use this `low_info_content` function in a detector in your {anomaly-job}, +it models information content that is present in the message string for each `logfilename`. It detects anomalies where the information content is low compared to its past behavior. For example, this function detects unusually low amounts of information in a collection of rolling log files. Low information diff --git a/docs/reference/ml/anomaly-detection/functions/metric.asciidoc b/docs/reference/ml/anomaly-detection/functions/metric.asciidoc index 7868d4b780a40..cb44b61849a22 100644 --- a/docs/reference/ml/anomaly-detection/functions/metric.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/metric.asciidoc @@ -35,7 +35,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing minimum transactions with the min function [source,js] @@ -48,9 +48,9 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `min` function in a detector in your job, it detects where the -smallest transaction is lower than previously observed. You can use this -function to detect items for sale at unintentionally low prices due to data +If you use this `min` function in a detector in your {anomaly-job}, it detects +where the smallest transaction is lower than previously observed. You can use +this function to detect items for sale at unintentionally low prices due to data entry mistakes. It models the minimum amount for each product over time. [float] @@ -70,7 +70,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 2: Analyzing maximum response times with the max function [source,js] @@ -83,9 +83,9 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `max` function in a detector in your job, it detects where the -longest `responsetime` is longer than previously observed. You can use this -function to detect applications that have `responsetime` values that are +If you use this `max` function in a detector in your {anomaly-job}, it detects +where the longest `responsetime` is longer than previously observed. You can use +this function to detect applications that have `responsetime` values that are unusually lengthy. It models the maximum `responsetime` for each application over time and detects when the longest `responsetime` is unusually long compared to previous applications. @@ -132,7 +132,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 4: Analyzing response times with the median function [source,js] @@ -145,9 +145,9 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `median` function in a detector in your job, it models the -median `responsetime` for each application over time. It detects when the median -`responsetime` is unusual compared to previous `responsetime` values. +If you use this `median` function in a detector in your {anomaly-job}, it models +the median `responsetime` for each application over time. It detects when the +median `responsetime` is unusual compared to previous `responsetime` values. [float] [[ml-metric-mean]] @@ -170,7 +170,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 5: Analyzing response times with the mean function [source,js] @@ -183,8 +183,8 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `mean` function in a detector in your job, it models the mean -`responsetime` for each application over time. It detects when the mean +If you use this `mean` function in a detector in your {anomaly-job}, it models +the mean `responsetime` for each application over time. It detects when the mean `responsetime` is unusual compared to previous `responsetime` values. .Example 6: Analyzing response times with the high_mean function @@ -198,9 +198,10 @@ If you use this `mean` function in a detector in your job, it models the mean -------------------------------------------------- // NOTCONSOLE -If you use this `high_mean` function in a detector in your job, it models the -mean `responsetime` for each application over time. It detects when the mean -`responsetime` is unusually high compared to previous `responsetime` values. +If you use this `high_mean` function in a detector in your {anomaly-job}, it +models the mean `responsetime` for each application over time. It detects when +the mean `responsetime` is unusually high compared to previous `responsetime` +values. .Example 7: Analyzing response times with the low_mean function [source,js] @@ -213,9 +214,10 @@ mean `responsetime` for each application over time. It detects when the mean -------------------------------------------------- // NOTCONSOLE -If you use this `low_mean` function in a detector in your job, it models the -mean `responsetime` for each application over time. It detects when the mean -`responsetime` is unusually low compared to previous `responsetime` values. +If you use this `low_mean` function in a detector in your {anomaly-job}, it +models the mean `responsetime` for each application over time. It detects when +the mean `responsetime` is unusually low compared to previous `responsetime` +values. [float] [[ml-metric-metric]] @@ -236,7 +238,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 8: Analyzing response times with the metric function [source,js] @@ -249,8 +251,8 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `metric` function in a detector in your job, it models the -mean, min, and max `responsetime` for each application over time. It detects +If you use this `metric` function in a detector in your {anomaly-job}, it models +the mean, min, and max `responsetime` for each application over time. It detects when the mean, min, or max `responsetime` is unusual compared to previous `responsetime` values. @@ -273,7 +275,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 9: Analyzing response times with the varp function [source,js] @@ -286,10 +288,10 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `varp` function in a detector in your job, it models the -variance in values of `responsetime` for each application over time. It detects -when the variance in `responsetime` is unusual compared to past application -behavior. +If you use this `varp` function in a detector in your {anomaly-job}, it models +the variance in values of `responsetime` for each application over time. It +detects when the variance in `responsetime` is unusual compared to past +application behavior. .Example 10: Analyzing response times with the high_varp function [source,js] @@ -302,10 +304,10 @@ behavior. -------------------------------------------------- // NOTCONSOLE -If you use this `high_varp` function in a detector in your job, it models the -variance in values of `responsetime` for each application over time. It detects -when the variance in `responsetime` is unusual compared to past application -behavior. +If you use this `high_varp` function in a detector in your {anomaly-job}, it +models the variance in values of `responsetime` for each application over time. +It detects when the variance in `responsetime` is unusual compared to past +application behavior. .Example 11: Analyzing response times with the low_varp function [source,js] @@ -318,7 +320,7 @@ behavior. -------------------------------------------------- // NOTCONSOLE -If you use this `low_varp` function in a detector in your job, it models the -variance in values of `responsetime` for each application over time. It detects -when the variance in `responsetime` is unusual compared to past application -behavior. +If you use this `low_varp` function in a detector in your {anomaly-job}, it +models the variance in values of `responsetime` for each application over time. +It detects when the variance in `responsetime` is unusual compared to past +application behavior. diff --git a/docs/reference/ml/anomaly-detection/functions/rare.asciidoc b/docs/reference/ml/anomaly-detection/functions/rare.asciidoc index 87c212fbd1275..3e9d0914c577b 100644 --- a/docs/reference/ml/anomaly-detection/functions/rare.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/rare.asciidoc @@ -13,8 +13,8 @@ number of times (frequency) rare values occur. ==== * The `rare` and `freq_rare` functions should not be used in conjunction with `exclude_frequent`. -* You cannot create forecasts for jobs that contain `rare` or `freq_rare` -functions. +* You cannot create forecasts for {anomaly-jobs} that contain `rare` or +`freq_rare` functions. * You cannot add rules with conditions to detectors that use `rare` or `freq_rare` functions. * Shorter bucket spans (less than 1 hour, for example) are recommended when @@ -47,7 +47,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing status codes with the rare function [source,js] @@ -59,10 +59,11 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `rare` function in a detector in your job, it detects values -that are rare in time. It models status codes that occur over time and detects -when rare status codes occur compared to the past. For example, you can detect -status codes in a web access log that have never (or rarely) occurred before. +If you use this `rare` function in a detector in your {anomaly-job}, it detects +values that are rare in time. It models status codes that occur over time and +detects when rare status codes occur compared to the past. For example, you can +detect status codes in a web access log that have never (or rarely) occurred +before. .Example 2: Analyzing status codes in a population with the rare function [source,js] @@ -75,15 +76,15 @@ status codes in a web access log that have never (or rarely) occurred before. -------------------------------------------------- // NOTCONSOLE -If you use this `rare` function in a detector in your job, it detects values -that are rare in a population. It models status code and client IP interactions -that occur. It defines a rare status code as one that occurs for few client IP -values compared to the population. It detects client IP values that experience -one or more distinct rare status codes compared to the population. For example -in a web access log, a `clientip` that experiences the highest number of -different rare status codes compared to the population is regarded as highly -anomalous. This analysis is based on the number of different status code values, -not the count of occurrences. +If you use this `rare` function in a detector in your {anomaly-job}, it detects +values that are rare in a population. It models status code and client IP +interactions that occur. It defines a rare status code as one that occurs for +few client IP values compared to the population. It detects client IP values +that experience one or more distinct rare status codes compared to the +population. For example in a web access log, a `clientip` that experiences the +highest number of different rare status codes compared to the population is +regarded as highly anomalous. This analysis is based on the number of different +status code values, not the count of occurrences. NOTE: To define a status code as rare the {ml-features} look at the number of distinct status codes that occur, not the number of times the status code @@ -105,7 +106,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 3: Analyzing URI values in a population with the freq_rare function [source,js] @@ -118,7 +119,7 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `freq_rare` function in a detector in your job, it +If you use this `freq_rare` function in a detector in your {anomaly-job}, it detects values that are frequently rare in a population. It models URI paths and client IP interactions that occur. It defines a rare URI path as one that is visited by few client IP values compared to the population. It detects the diff --git a/docs/reference/ml/anomaly-detection/functions/sum.asciidoc b/docs/reference/ml/anomaly-detection/functions/sum.asciidoc index 9313a60a01a6c..260fc3f726c53 100644 --- a/docs/reference/ml/anomaly-detection/functions/sum.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/sum.asciidoc @@ -2,7 +2,8 @@ [[ml-sum-functions]] === Sum functions -The sum functions detect anomalies when the sum of a field in a bucket is anomalous. +The sum functions detect anomalies when the sum of a field in a bucket is +anomalous. If you want to monitor unusually high totals, use high-sided functions. @@ -35,7 +36,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing total expenses with the sum function [source,js] @@ -49,7 +50,7 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `sum` function in a detector in your job, it +If you use this `sum` function in a detector in your {anomaly-job}, it models total expenses per employees for each cost center. For each time bucket, it detects when an employee’s expenses are unusual for a cost center compared to other employees. @@ -65,7 +66,7 @@ to other employees. -------------------------------------------------- // NOTCONSOLE -If you use this `high_sum` function in a detector in your job, it +If you use this `high_sum` function in a detector in your {anomaly-job}, it models total `cs_bytes`. It detects `cs_hosts` that transfer unusually high volumes compared to other `cs_hosts`. This example looks for volumes of data transferred from a client to a server on the internet that are unusual compared @@ -91,7 +92,7 @@ These functions support the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. NOTE: Population analysis (that is to say, use of the `over_field_name` property) is not applicable for this function. @@ -107,9 +108,7 @@ is not applicable for this function. -------------------------------------------------- // NOTCONSOLE -If you use this `high_non_null_sum` function in a detector in your job, it -models the total `amount_approved` for each employee. It ignores any buckets +If you use this `high_non_null_sum` function in a detector in your {anomaly-job}, +it models the total `amount_approved` for each employee. It ignores any buckets where the amount is null. It detects employees who approve unusually high amounts compared to their past behavior. -//For this credit control system analysis, using non_null_sum will ignore -//periods where the employees are not active on the system. diff --git a/docs/reference/ml/anomaly-detection/functions/time.asciidoc b/docs/reference/ml/anomaly-detection/functions/time.asciidoc index 026d29d85d3d7..422b4e995ec73 100644 --- a/docs/reference/ml/anomaly-detection/functions/time.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/time.asciidoc @@ -14,22 +14,25 @@ The {ml-features} include the following time functions: [NOTE] ==== -* NOTE: You cannot create forecasts for jobs that contain time functions. -* The `time_of_day` function is not aware of the difference between days, for instance -work days and weekends. When modeling different days, use the `time_of_week` function. -In general, the `time_of_week` function is more suited to modeling the behavior of people -rather than machines, as people vary their behavior according to the day of the week. -* Shorter bucket spans (for example, 10 minutes) are recommended when performing a -`time_of_day` or `time_of_week` analysis. The time of the events being modeled are not -affected by the bucket span, but a shorter bucket span enables quicker alerting on unusual -events. -* Unusual events are flagged based on the previous pattern of the data, not on what we -might think of as unusual based on human experience. So, if events typically occur -between 3 a.m. and 5 a.m., and event occurring at 3 p.m. is be flagged as unusual. -* When Daylight Saving Time starts or stops, regular events can be flagged as anomalous. -This situation occurs because the actual time of the event (as measured against a UTC -baseline) has changed. This situation is treated as a step change in behavior and the new -times will be learned quickly. +* NOTE: You cannot create forecasts for {anomaly-jobs} that contain time +functions. +* The `time_of_day` function is not aware of the difference between days, for +instance work days and weekends. When modeling different days, use the +`time_of_week` function. In general, the `time_of_week` function is more suited +to modeling the behavior of people rather than machines, as people vary their +behavior according to the day of the week. +* Shorter bucket spans (for example, 10 minutes) are recommended when performing +a `time_of_day` or `time_of_week` analysis. The time of the events being modeled +are not affected by the bucket span, but a shorter bucket span enables quicker +alerting on unusual events. +* Unusual events are flagged based on the previous pattern of the data, not on +what we might think of as unusual based on human experience. So, if events +typically occur between 3 a.m. and 5 a.m., and event occurring at 3 p.m. is be +flagged as unusual. +* When Daylight Saving Time starts or stops, regular events can be flagged as +anomalous. This situation occurs because the actual time of the event (as +measured against a UTC baseline) has changed. This situation is treated as a +step change in behavior and the new times will be learned quickly. ==== [float] @@ -51,7 +54,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing events with the time_of_day function [source,js] @@ -63,7 +66,7 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `time_of_day` function in a detector in your job, it +If you use this `time_of_day` function in a detector in your {anomaly-job}, it models when events occur throughout a day for each process. It detects when an event occurs for a process that is at an unusual time in the day compared to its past behavior. @@ -82,7 +85,7 @@ This function supports the following properties: * `partition_field_name` (optional) For more information about those properties, see -{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +{ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 2: Analyzing events with the time_of_week function [source,js] @@ -95,7 +98,7 @@ For more information about those properties, see -------------------------------------------------- // NOTCONSOLE -If you use this `time_of_week` function in a detector in your job, it +If you use this `time_of_week` function in a detector in your {anomaly-job}, it models when events occur throughout the week for each `eventcode`. It detects when a workstation event occurs at an unusual time during the week for that `eventcode` compared to other workstations. It detects events for a diff --git a/docs/reference/ml/anomaly-detection/populations.asciidoc b/docs/reference/ml/anomaly-detection/populations.asciidoc index df1771039c098..febbbb29e5ee0 100644 --- a/docs/reference/ml/anomaly-detection/populations.asciidoc +++ b/docs/reference/ml/anomaly-detection/populations.asciidoc @@ -57,9 +57,9 @@ PUT _ml/anomaly_detectors/population in each bucket. If your data is stored in {es}, you can use the population job wizard in {kib} -to create a job with these same properties. For example, if you add the sample -web logs in {kib}, you can use the following job settings in the population job -wizard: +to create an {anomaly-job} with these same properties. For example, if you add +the sample web logs in {kib}, you can use the following job settings in the +population job wizard: [role="screenshot"] image::images/ml-population-job.jpg["Job settings in the population job wizard] diff --git a/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc b/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc index ddb3919dd5281..454d8497634dc 100644 --- a/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc +++ b/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc @@ -1,22 +1,22 @@ [role="xpack"] [[stopping-ml]] -== Stopping machine learning +== Stopping {ml} {anomaly-detect} -An orderly shutdown of {ml} ensures that: +An orderly shutdown ensures that: * {dfeeds-cap} are stopped * Buffers are flushed * Model history is pruned * Final results are calculated * Model snapshots are saved -* Jobs are closed +* {anomaly-jobs-cap} are closed This process ensures that jobs are in a consistent state in case you want to subsequently re-open them. [float] [[stopping-ml-datafeeds]] -=== Stopping {dfeeds-cap} +=== Stopping {dfeeds} When you stop a {dfeed}, it ceases to retrieve data from {es}. You can stop a {dfeed} by using {kib} or the @@ -25,7 +25,7 @@ request stops the `feed1` {dfeed}: [source,js] -------------------------------------------------- -POST _ml/datafeeds/datafeed-total-requests/_stop +POST _ml/datafeeds/feed1/_stop -------------------------------------------------- // CONSOLE // TEST[skip:setup:server_metrics_startdf] @@ -39,7 +39,7 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. [float] [[stopping-all-ml-datafeeds]] -==== Stopping All {dfeeds-cap} +==== Stopping all {dfeeds} If you are upgrading your cluster, you can use the following request to stop all {dfeeds}: @@ -53,19 +53,20 @@ POST _ml/datafeeds/_all/_stop [float] [[closing-ml-jobs]] -=== Closing Jobs +=== Closing {anomaly-jobs} -When you close a job, it cannot receive data or perform analysis operations. -If a job is associated with a {dfeed}, you must stop the {dfeed} before you can -close the jobs. If the {dfeed} has an end date, the job closes automatically on -that end date. +When you close an {anomaly-job}, it cannot receive data or perform analysis +operations. If a job is associated with a {dfeed}, you must stop the {dfeed} +before you can close the job. If the {dfeed} has an end date, the job closes +automatically on that end date. -You can close a job by using the {ref}/ml-close-job.html[close job API]. For +You can close a job by using the +{ref}/ml-close-job.html[close {anomaly-job} API]. For example, the following request closes the `job1` job: [source,js] -------------------------------------------------- -POST _ml/anomaly_detectors/total-requests/_close +POST _ml/anomaly_detectors/job1/_close -------------------------------------------------- // CONSOLE // TEST[skip:setup:server_metrics_openjob] @@ -73,14 +74,15 @@ POST _ml/anomaly_detectors/total-requests/_close NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. -A job can be opened and closed multiple times throughout its lifecycle. +{anomaly-jobs-cap} can be opened and closed multiple times throughout their +lifecycle. [float] [[closing-all-ml-datafeeds]] -==== Closing All Jobs +==== Closing all {anomaly-jobs} If you are upgrading your cluster, you can use the following request to close -all open jobs on the cluster: +all open {anomaly-jobs} on the cluster: [source,js] ---------------------------------- diff --git a/docs/reference/ml/anomaly-detection/transforms.asciidoc b/docs/reference/ml/anomaly-detection/transforms.asciidoc index 9c11206f8bf2f..014a9ba9744f2 100644 --- a/docs/reference/ml/anomaly-detection/transforms.asciidoc +++ b/docs/reference/ml/anomaly-detection/transforms.asciidoc @@ -7,9 +7,9 @@ it is analyzed. {dfeeds-cap} contain an optional `script_fields` property, where you can specify scripts that evaluate custom expressions and return script fields. -If your {dfeed} defines script fields, you can use those fields in your job. -For example, you can use the script fields in the analysis functions in one or -more detectors. +If your {dfeed} defines script fields, you can use those fields in your +{anomaly-job}. For example, you can use the script fields in the analysis +functions in one or more detectors. * <> * <> @@ -146,12 +146,14 @@ PUT _ml/datafeeds/datafeed-test1 within the job. <2> The script field is defined in the {dfeed}. -This `test1` job contains a detector that uses a script field in a mean analysis -function. The `datafeed-test1` {dfeed} defines the script field. It contains a -script that adds two fields in the document to produce a "total" error count. +This `test1` {anomaly-job} contains a detector that uses a script field in a +mean analysis function. The `datafeed-test1` {dfeed} defines the script field. +It contains a script that adds two fields in the document to produce a "total" +error count. The syntax for the `script_fields` property is identical to that used by {es}. -For more information, see {ref}/search-request-body.html#request-body-search-script-fields[Script Fields]. +For more information, see +{ref}/search-request-body.html#request-body-search-script-fields[Script fields]. You can preview the contents of the {dfeed} by using the following API: @@ -181,15 +183,15 @@ insufficient data to generate meaningful results. //For a full demonstration of //how to create jobs with sample data, see <>. -You can alternatively use {kib} to create an advanced job that uses script -fields. To add the `script_fields` property to your {dfeed}, you must use the -**Edit JSON** tab. For example: +You can alternatively use {kib} to create an advanced {anomaly-job} that uses +script fields. To add the `script_fields` property to your {dfeed}, you must use +the **Edit JSON** tab. For example: [role="screenshot"] image::images/ml-scriptfields.jpg[Adding script fields to a {dfeed} in {kib}] [[ml-configuring-transform-examples]] -==== Common Script Field Examples +==== Common script field examples While the possibilities are limitless, there are a number of common scenarios where you might use script fields in your {dfeeds}. @@ -199,7 +201,7 @@ where you might use script fields in your {dfeeds}. Some of these examples use regular expressions. By default, regular expressions are disabled because they circumvent the protection that Painless provides against long running and memory hungry scripts. For more information, -see {ref}/modules-scripting-painless.html[Painless Scripting Language]. +see {ref}/modules-scripting-painless.html[Painless scripting language]. Machine learning analysis is case sensitive. For example, "John" is considered to be different than "john". This is one reason you might consider using scripts From 20284b8fae5e567621f9bda33547923081c80f4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Perlak?= Date: Fri, 26 Jul 2019 21:40:27 +0200 Subject: [PATCH 50/51] Optimize Min and Max BKD optimizations (#44315) MinAggregator - skip BKD optimization when no result found after 1024 lookups. MaxAggregator - skip unnecessary conversions. --- .../search/aggregations/metrics/MaxAggregator.java | 11 +++++++---- .../search/aggregations/metrics/MinAggregator.java | 6 ++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregator.java index 02083177099a5..27b21a1ebd896 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregator.java @@ -22,9 +22,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PointValues; import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.Bits; import org.apache.lucene.util.FutureArrays; -import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -174,7 +174,7 @@ static Number findLeafMaxValue(LeafReader reader, String fieldName, Function MAX_BKD_LOOKUPS) { + throw new CollectionTerminatedException(); + } } @Override From 3f384634d6baa4cd2a2c8363d56a2cbb6601244c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 26 Jul 2019 22:27:13 +0200 Subject: [PATCH 51/51] Guard open connection call in RemoteClusterConnection (#44921) Fixes an issue where a call to openConnection was not properly guarded, allowing an exception to bubble up to the uncaught exception handler, causing test failures. Closes #44912 --- .../transport/RemoteClusterConnection.java | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 4205dcbddaba5..73d9d09473e5f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -432,14 +432,6 @@ private void collectRemoteNodes(Iterator> seedNodes, Act } if (seedNodes.hasNext()) { - final DiscoveryNode seedNode = maybeAddProxyAddress(proxyAddress, seedNodes.next().get()); - logger.debug("[{}] opening connection to seed node: [{}] proxy address: [{}]", clusterAlias, seedNode, - proxyAddress); - final ConnectionProfile profile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG); - - final StepListener openConnectionStep = new StepListener<>(); - connectionManager.openConnection(seedNode, profile, openConnectionStep); - final Consumer onFailure = e -> { if (e instanceof ConnectTransportException || e instanceof IOException || @@ -456,6 +448,17 @@ private void collectRemoteNodes(Iterator> seedNodes, Act listener.onFailure(e); }; + final DiscoveryNode seedNode = maybeAddProxyAddress(proxyAddress, seedNodes.next().get()); + logger.debug("[{}] opening connection to seed node: [{}] proxy address: [{}]", clusterAlias, seedNode, + proxyAddress); + final ConnectionProfile profile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG); + final StepListener openConnectionStep = new StepListener<>(); + try { + connectionManager.openConnection(seedNode, profile, openConnectionStep); + } catch (Exception e) { + onFailure.accept(e); + } + final StepListener handShakeStep = new StepListener<>(); openConnectionStep.whenComplete(connection -> { ConnectionProfile connectionProfile = connectionManager.getConnectionProfile();