diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index de52d75c6008c..5eec829dfa1ba 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -24,6 +24,7 @@ import org.elasticsearch.gradle.BuildPlugin import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.plugins.JavaBasePlugin +import org.gradle.api.tasks.compile.JavaCompile /** * Configures the build to compile against Elasticsearch's test framework and @@ -49,5 +50,12 @@ public class StandaloneTestPlugin implements Plugin { test.testClassesDir project.sourceSets.test.output.classesDir test.mustRunAfter(project.precommit) project.check.dependsOn(test) + + project.tasks.withType(JavaCompile) { + // This will be the default in Gradle 5.0 + if (options.compilerArgs.contains("-processor") == false) { + options.compilerArgs << '-proc:none' + } + } } } diff --git a/distribution/src/bin/elasticsearch-cli b/distribution/src/bin/elasticsearch-cli new file mode 100644 index 0000000000000..94f8f763bb1c6 --- /dev/null +++ b/distribution/src/bin/elasticsearch-cli @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e -o pipefail + +source "`dirname "$0"`"/elasticsearch-env + +IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES" +for additional_source in "${additional_sources[@]}" +do + source "`dirname "$0"`"/$additional_source +done + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + $1 \ + "${@:2}" diff --git a/distribution/src/bin/elasticsearch-keystore b/distribution/src/bin/elasticsearch-keystore index aee62dfde50d4..ebe24179a0ea5 100755 --- a/distribution/src/bin/elasticsearch-keystore +++ b/distribution/src/bin/elasticsearch-keystore @@ -1,14 +1,5 @@ #!/bin/bash -source "`dirname "$0"`"/elasticsearch-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +"`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.common.settings.KeyStoreCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-plugin b/distribution/src/bin/elasticsearch-plugin index 500fd710c1aea..67b6ea7e13c37 100755 --- a/distribution/src/bin/elasticsearch-plugin +++ b/distribution/src/bin/elasticsearch-plugin @@ -1,14 +1,5 @@ #!/bin/bash -source "`dirname "$0"`"/elasticsearch-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +"`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.plugins.PluginCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-translog b/distribution/src/bin/elasticsearch-translog index e176231c6f44d..33350aaf0b65f 100755 --- a/distribution/src/bin/elasticsearch-translog +++ b/distribution/src/bin/elasticsearch-translog @@ -1,14 +1,5 @@ #!/bin/bash -source "`dirname "$0"`"/elasticsearch-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +"`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.index.translog.TranslogToolCli \ "$@" diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc index 7997c87e3e45f..3344bd9f75132 100644 --- a/docs/painless/painless-execute-script.asciidoc +++ b/docs/painless/painless-execute-script.asciidoc @@ -1,6 +1,8 @@ [[painless-execute-api]] === Painless execute API +experimental[The painless execute api is new and the request / response format may change in a breaking way in the future] + The Painless execute API allows an arbitrary script to be executed and a result to be returned. [[painless-execute-api-parameters]] diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index f2882e6fb60d4..32a454624990c 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -25,7 +25,7 @@ PUT twitter } -------------------------------------------------- // CONSOLE -<1> Default for `number_of_shards` is 5 +<1> Default for `number_of_shards` is 1 <2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard) The above second curl example shows how an index called `twitter` can be diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java index 5a146f75919ce..b910526ef3d98 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -54,7 +54,7 @@ public String getType() { @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { - if (context.equals(SearchScript.CONTEXT) == false) { + if (context.equals(SearchScript.SCRIPT_SCORE_CONTEXT) == false) { throw new IllegalArgumentException(getType() + " scripts cannot be used for context [" + context.name + "]"); } // we use the script "source" as the script identifier diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 9f10c5dcfab73..6c4004074fa3b 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -30,6 +30,26 @@ task bwcTest { } for (Version version : bwcVersions.wireCompatible) { + /* + * The goal here is to: + *
    + *
  • start three nodes on the old version + *
  • run tests with systemProperty 'tests.rest.suite', 'old_cluster' + *
  • shut down one node + *
  • start a node with the new version + *
  • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' + *
  • shut down one node on the old version + *
  • start a node with the new version + *
  • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again + *
  • shut down the last node with the old version + *
  • start a node with the new version + *
  • run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster' + *
  • shut down the entire cluster + *
+ * + * Be careful: gradle dry run spits out tasks in the wrong order but, + * strangely, running the tasks works properly. + */ String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { @@ -39,8 +59,8 @@ for (Version version : bwcVersions.wireCompatible) { Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { bwcVersion = version - numBwcNodes = 2 - numNodes = 2 + numBwcNodes = 3 + numNodes = 3 clusterName = 'rolling-upgrade' setting 'repositories.url.allowed_urls', 'http://snapshot.test*' if (version.onOrAfter('5.3.0')) { @@ -53,43 +73,57 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.suite', 'old_cluster' } - Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> unicastSeed() } + minimumMasterNodes = { 3 } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + } + } - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { - dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - /* Override the data directory so the new node always gets the node we - * just stopped's data directory. */ - dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, + 0, { oldClusterTest.nodes.get(1).transportUri() }) + + Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") + oneThirdUpgradedTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'true' + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } - Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") - mixedClusterTestRunner.configure { + Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, + 1, { oneThirdUpgradedTest.nodes.get(0).transportUri() }) + + Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") + twoThirdsUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop" - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - /* Override the data directory so the new node always gets the node we - * just stopped's data directory. */ - dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir} - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - } + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, + 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' - // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy "${baseName}#mixedClusterTestCluster#stop" + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java new file mode 100644 index 0000000000000..6f4453aa06cc9 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.function.Predicate; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +public abstract class AbstractRollingTestCase extends ESRestTestCase { + protected enum ClusterType { + OLD, + MIXED, + UPGRADED; + + public static ClusterType parse(String value) { + switch (value) { + case "old_cluster": + return OLD; + case "mixed_cluster": + return MIXED; + case "upgraded_cluster": + return UPGRADED; + default: + throw new AssertionError("unknown cluster type: " + value); + } + } + } + + protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); + + @Override + protected final boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected final boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected final Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()) + // increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s") + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") + .build(); + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java new file mode 100644 index 0000000000000..f1e01d24acff6 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +/** + * Basic test that indexed documents survive the rolling restart. See + * {@link RecoveryIT} for much more in depth testing of the mechanism + * by which they survive. + */ +public class IndexingIT extends AbstractRollingTestCase { + public void testIndexing() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + break; + case MIXED: + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + break; + case UPGRADED: + Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + if (CLUSTER_TYPE == ClusterType.OLD) { + Request createTestIndex = new Request("PUT", "/test_index"); + createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); + client().performRequest(createTestIndex); + + String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}"; + Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas"); + createIndexWithReplicas.setJsonEntity(recoverQuickly); + client().performRequest(createIndexWithReplicas); + + Request createEmptyIndex = new Request("PUT", "/empty_index"); + // Ask for recovery to be quick + createEmptyIndex.setJsonEntity(recoverQuickly); + client().performRequest(createEmptyIndex); + + bulk("test_index", "_OLD", 5); + bulk("index_with_replicas", "_OLD", 5); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount("test_index", expectedCount); + assertCount("index_with_replicas", 5); + assertCount("empty_index", 0); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk("test_index", "_" + CLUSTER_TYPE, 5); + Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount("test_index", expectedCount + 6); + + Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + + assertCount("test_index", expectedCount + 5); + } + } + + private void bulk(String index, String valueSuffix, int count) throws IOException { + StringBuilder b = new StringBuilder(); + for (int i = 0; i < count; i++) { + b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.setJsonEntity(b.toString()); + client().performRequest(bulk); + } + + private void assertCount(String index, int count) throws IOException { + Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); + searchTestIndexRequest.addParameter("filter_path", "hits.total"); + Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); + assertEquals("{\"hits\":{\"total\":" + count + "}}", + EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8)); + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index d208a7097a784..350636551d9ad 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -46,53 +46,13 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -public class RecoveryIT extends ESRestTestCase { - - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - private enum CLUSTER_TYPE { - OLD, - MIXED, - UPGRADED; - - public static CLUSTER_TYPE parse(String value) { - switch (value) { - case "old_cluster": - return OLD; - case "mixed_cluster": - return MIXED; - case "upgraded_cluster": - return UPGRADED; - default: - throw new AssertionError("unknown cluster type: " + value); - } - } - } - - private final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); - - @Override - protected Settings restClientSettings() { - return Settings.builder().put(super.restClientSettings()) - // increase the timeout here to 90 seconds to handle long waits for a green - // cluster health. the waits for green need to be longer than a minute to - // account for delayed shards - .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s") - .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") - .build(); - } - +/** + * In depth testing of the recovery mechanism during a rolling restart. + */ +public class RecoveryIT extends AbstractRollingTestCase { public void testHistoryUUIDIsGenerated() throws Exception { final String index = "index_history_uuid"; - if (clusterType == CLUSTER_TYPE.OLD) { + if (CLUSTER_TYPE == ClusterType.OLD) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) @@ -102,7 +62,7 @@ public void testHistoryUUIDIsGenerated() throws Exception { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); createIndex(index, settings.build()); - } else if (clusterType == CLUSTER_TYPE.UPGRADED) { + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { ensureGreen(index); Response response = client().performRequest("GET", index + "/_stats", Collections.singletonMap("level", "shards")); assertOK(response); @@ -157,11 +117,11 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { final Map nodeMap = objectPath.evaluate("nodes"); List nodes = new ArrayList<>(nodeMap.keySet()); - switch (clusterType) { + switch (CLUSTER_TYPE) { case OLD: Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -181,6 +141,7 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { assertOK(client().performRequest("POST", index + "/_refresh")); assertCount(index, "_only_nodes:" + nodes.get(0), 60); assertCount(index, "_only_nodes:" + nodes.get(1), 60); + assertCount(index, "_only_nodes:" + nodes.get(2), 60); // make sure that we can index while the replicas are recovering updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries")); break; @@ -191,9 +152,10 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { assertOK(client().performRequest("POST", index + "/_refresh")); assertCount(index, "_only_nodes:" + nodes.get(0), 110); assertCount(index, "_only_nodes:" + nodes.get(1), 110); + assertCount(index, "_only_nodes:" + nodes.get(2), 110); break; default: - throw new IllegalStateException("unknown type " + clusterType); + throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } } @@ -221,11 +183,11 @@ private String getNodeId(Predicate versionPredicate) throws IOException public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; - switch (clusterType) { + switch (CLUSTER_TYPE) { case OLD: Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -258,7 +220,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { break; case UPGRADED: updateIndexSettings(index, Settings.builder() - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) .put("index.routing.allocation.include._id", (String)null) ); asyncIndexDocs(index, 60, 50).get(); @@ -271,9 +233,10 @@ public void testRelocationWithConcurrentIndexing() throws Exception { assertCount(index, "_only_nodes:" + nodes.get(0), 110); assertCount(index, "_only_nodes:" + nodes.get(1), 110); + assertCount(index, "_only_nodes:" + nodes.get(2), 110); break; default: - throw new IllegalStateException("unknown type " + clusterType); + throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index f3c0256b2c3c5..7932328c8c2f6 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -60,4 +60,3 @@ protected Settings restClientSettings() { .build(); } } - diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 0810341db1317..0cce81c8985cd 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -1,74 +1,8 @@ ---- -"Index data and search on the mixed cluster": - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - - - do: - search: - index: test_index - - - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster - - - do: - search: - index: index_with_replicas - - - match: { hits.total: 5 } # just check we recovered fine - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v1_mixed", "f2": 5}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v2_mixed", "f2": 6}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v3_mixed", "f2": 7}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v4_mixed", "f2": 8}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v5_mixed", "f2": 9}' - - - do: - index: - index: test_index - type: doc - id: d10 - body: {"f1": "v6_mixed", "f2": 10} - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 11 } # 5 docs from old cluster, 6 docs from mixed cluster - - - do: - delete: - index: test_index - type: doc - id: d10 - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 10 } - --- "Verify that we can still find things with the template": - do: search_template: + index: test_search_template body: id: test_search_template params: diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index 067eba6e4b860..04d85eb607835 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -1,76 +1,5 @@ --- -"Index data, search, and create things in the cluster state that we'll validate are there after the ugprade": - - do: - indices.create: - index: test_index - body: - settings: - index: - number_of_replicas: 0 - - do: - indices.create: - index: index_with_replicas # dummy index to ensure we can recover indices with replicas just fine - body: - # if the node with the replica is the first to be restarted, then delayed - # allocation will kick in, and the cluster health won't return to GREEN - # before timing out - index.unassigned.node_left.delayed_timeout: "100ms" - - - do: - indices.create: - index: empty_index # index to ensure we can recover empty indices - body: - # if the node with the replica is the first to be restarted, then delayed - # allocation will kick in, and the cluster health won't return to GREEN - # before timing out - index.unassigned.node_left.delayed_timeout: "100ms" - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v1_old", "f2": 0}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v2_old", "f2": 1}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v3_old", "f2": 2}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v4_old", "f2": 3}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v5_old", "f2": 4}' - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - - do: - indices.refresh: - index: test_index,index_with_replicas - - - do: - search: - index: test_index - - - match: { hits.total: 5 } - - - do: - search: - index: index_with_replicas - - - match: { hits.total: 5 } - +"Create things in the cluster state that we'll validate are there after the ugprade": - do: snapshot.create_repository: repository: my_repo @@ -91,6 +20,21 @@ } - match: { "acknowledged": true } + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v1_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v2_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v3_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v4_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v5_old"}' + - do: put_script: id: test_search_template @@ -105,6 +49,7 @@ - do: search_template: + index: test_search_template body: id: test_search_template params: diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 011db854ecdc6..3e293f91ce12a 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -1,55 +1,8 @@ ---- -"Index data and search on the upgraded cluster": - - do: - cluster.health: - wait_for_status: green - wait_for_nodes: 2 - # wait for long enough that we give delayed unassigned shards to stop being delayed - timeout: 70s - level: shards - index: test_index,index_with_replicas,empty_index - - - do: - search: - index: test_index - - - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters - - - do: - search: - index: index_with_replicas - - - match: { hits.total: 5 } # just check we recovered fine - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v1_upgraded", "f2": 10}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v2_upgraded", "f2": 11}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v3_upgraded", "f2": 12}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v4_upgraded", "f2": 13}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v5_upgraded", "f2": 14}' - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs - --- "Verify that we can still find things with the template": - do: search_template: + index: test_search_template body: id: test_search_template params: diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index d4df9e3fffeda..a2820711da556 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -167,7 +167,7 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_6_2_5_ID = 6020599; public static final Version V_6_2_5 = new Version(V_6_2_5_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_7_0_0_alpha1_ID = 7000001; diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java index 5caabd445b32e..fbada58f29477 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java @@ -249,7 +249,8 @@ protected Query doToQuery(QueryShardContext context) throws IOException { IndexNumericFieldData fieldData = context.getForField(msmFieldType); longValuesSource = new FieldValuesSource(fieldData); } else if (minimumShouldMatchScript != null) { - SearchScript.Factory factory = context.getScriptService().compile(minimumShouldMatchScript, SearchScript.CONTEXT); + SearchScript.Factory factory = context.getScriptService().compile(minimumShouldMatchScript, + SearchScript.TERMS_SET_QUERY_CONTEXT); Map params = new HashMap<>(); params.putAll(minimumShouldMatchScript.getParams()); params.put("num_terms", queries.size()); diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index cc89518154d12..ed4c5f5a26952 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -92,7 +92,7 @@ protected int doHashCode() { @Override protected ScoreFunction doToFunction(QueryShardContext context) { try { - SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.CONTEXT); + SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.SCRIPT_SCORE_CONTEXT); SearchScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); return new ScriptScoreFunction(script, searchScript); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index 9444563d1c6f1..b865bd3a42029 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest.action.admin.indices; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; @@ -47,6 +48,8 @@ public abstract class RestResizeHandler extends BaseRestHandler { public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); resizeRequest.setResizeType(getResizeType()); + // copy_settings should be removed in Elasticsearch 8.0.0; cf. https://github.com/elastic/elasticsearch/issues/28347 + assert Version.CURRENT.major < 8; final String rawCopySettings = request.param("copy_settings"); final Boolean copySettings; if (rawCopySettings == null) { diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index 5afb6ad28d7ab..583421be8e581 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -42,6 +42,9 @@ public class ScriptModule { CORE_CONTEXTS = Stream.of( SearchScript.CONTEXT, SearchScript.AGGS_CONTEXT, + SearchScript.SCRIPT_SCORE_CONTEXT, + SearchScript.SCRIPT_SORT_CONTEXT, + SearchScript.TERMS_SET_QUERY_CONTEXT, ExecutableScript.CONTEXT, ExecutableScript.AGGS_CONTEXT, ExecutableScript.UPDATE_CONTEXT, diff --git a/server/src/main/java/org/elasticsearch/script/SearchScript.java b/server/src/main/java/org/elasticsearch/script/SearchScript.java index d0c932a3490d8..e5762adb1bbe9 100644 --- a/server/src/main/java/org/elasticsearch/script/SearchScript.java +++ b/server/src/main/java/org/elasticsearch/script/SearchScript.java @@ -158,6 +158,12 @@ public interface Factory { /** The context used to compile {@link SearchScript} factories. */ public static final ScriptContext CONTEXT = new ScriptContext<>("search", Factory.class); - // TODO: remove aggs context when it has its own interface + // TODO: remove these contexts when it has its own interface public static final ScriptContext AGGS_CONTEXT = new ScriptContext<>("aggs", Factory.class); -} \ No newline at end of file + // Can return a double. (For ScriptSortType#NUMBER only, for ScriptSortType#STRING normal CONTEXT should be used) + public static final ScriptContext SCRIPT_SORT_CONTEXT = new ScriptContext<>("sort", Factory.class); + // Can return a float + public static final ScriptContext SCRIPT_SCORE_CONTEXT = new ScriptContext<>("score", Factory.class); + // Can return a long + public static final ScriptContext TERMS_SET_QUERY_CONTEXT = new ScriptContext<>("terms_set", Factory.class); +} diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 99668515de5b1..6e52160238b33 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -305,7 +305,7 @@ public static ScriptSortBuilder fromXContent(XContentParser parser, String eleme @Override public SortFieldAndFormat build(QueryShardContext context) throws IOException { - final SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.CONTEXT); + final SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.SCRIPT_SORT_CONTEXT); final SearchScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); MultiValueMode valueMode = null; diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java index 125cb572ea54d..8e69b8093597f 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java @@ -154,6 +154,7 @@ public void testAutoQueueSizingWithMin() throws Exception { context.close(); } + @TestLogging("org.elasticsearch.common.util.concurrent:DEBUG") public void testAutoQueueSizingWithMax() throws Exception { ThreadContext context = new ThreadContext(Settings.EMPTY); ResizableBlockingQueue queue = diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 835b5506c6322..108b41d54a08e 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -470,6 +470,7 @@ protected void sendRequest(Connection connection, long requestId, String action, * TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several * parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30577") @TestLogging("org.elasticsearch.repositories:TRACE,org.elasticsearch.snapshots:TRACE,org.elasticsearch.index.engine:DEBUG") public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException { int numDocs = scaledRandomIntBetween(100, 1000); diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 2bf691e6a36cf..842748107d1d1 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -76,7 +76,7 @@ public String getType() { @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { assert scriptSource.equals("explainable_script"); - assert context == SearchScript.CONTEXT; + assert context == SearchScript.SCRIPT_SCORE_CONTEXT; SearchScript.Factory factory = (p, lookup) -> new SearchScript.LeafFactory() { @Override public SearchScript newInstance(LeafReaderContext context) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index b709e32946ec6..6af323f1510e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -467,6 +468,14 @@ public static DatafeedState getDatafeedState(String datafeedId, @Nullable Persis } } + public static MlMetadata getMlMetadata(ClusterState state) { + MlMetadata mlMetadata = (state == null) ? null : state.getMetaData().custom(MLMetadataField.TYPE); + if (mlMetadata == null) { + return EMPTY_METADATA; + } + return mlMetadata; + } + public static class JobAlreadyMarkedAsDeletedException extends RuntimeException { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index 1ac842e8898bf..4e51d7b6c1e30 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ml.job.persistence; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; /** @@ -47,8 +46,7 @@ public static String resultsWriteAlias(String jobId) { * @return The index name */ public static String getPhysicalIndexFromState(ClusterState state, String jobId) { - MlMetadata meta = state.getMetaData().custom(MLMetadataField.TYPE); - return meta.getJobs().get(jobId).getResultsIndexName(); + return MlMetadata.getMlMetadata(state).getJobs().get(jobId).getResultsIndexName(); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index f533fc1aa5a7c..05af1ffee17a4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; @@ -132,15 +131,7 @@ public Map nativeCodeInfo() { @Override public void usage(ActionListener listener) { ClusterState state = clusterService.state(); - MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE); - - // Handle case when usage is called but MlMetadata has not been installed yet - if (mlMetadata == null) { - listener.onResponse(new MachineLearningFeatureSetUsage(available(), enabled, - Collections.emptyMap(), Collections.emptyMap())); - } else { - new Retriever(client, mlMetadata, available(), enabled()).execute(listener); - } + new Retriever(client, MlMetadata.getMlMetadata(state), available(), enabled()).execute(listener); } public static class Retriever { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index 87cf03caa9949..37d714d1777da 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; @@ -90,8 +89,7 @@ public void clusterChanged(ClusterChangedEvent event) { } } else if (StartDatafeedAction.TASK_NAME.equals(currentTask.getTaskName())) { String datafeedId = ((StartDatafeedAction.DatafeedParams) currentTask.getParams()).getDatafeedId(); - MlMetadata mlMetadata = event.state().getMetaData().custom(MLMetadataField.TYPE); - DatafeedConfig datafeedConfig = mlMetadata.getDatafeed(datafeedId); + DatafeedConfig datafeedConfig = MlMetadata.getMlMetadata(event.state()).getDatafeed(datafeedId); if (currentAssignment.getExecutorNode() == null) { String msg = "No node found to start datafeed [" + datafeedId +"]. Reasons [" + currentAssignment.getExplanation() + "]"; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index 5dba8ce943c44..c96a12ffa1047 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -7,20 +7,13 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; -import org.elasticsearch.xpack.core.ml.MlMetadata; - -import java.util.concurrent.atomic.AtomicBoolean; class MlInitializationService extends AbstractComponent implements ClusterStateListener { @@ -28,8 +21,6 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL private final ClusterService clusterService; private final Client client; - private final AtomicBoolean installMlMetadataCheck = new AtomicBoolean(false); - private volatile MlDailyMaintenanceService mlDailyMaintenanceService; MlInitializationService(Settings settings, ThreadPool threadPool, ClusterService clusterService, Client client) { @@ -48,45 +39,12 @@ public void clusterChanged(ClusterChangedEvent event) { } if (event.localNodeMaster()) { - MetaData metaData = event.state().metaData(); - installMlMetadata(metaData); installDailyMaintenanceService(); } else { uninstallDailyMaintenanceService(); } } - private void installMlMetadata(MetaData metaData) { - if (metaData.custom(MLMetadataField.TYPE) == null) { - if (installMlMetadataCheck.compareAndSet(false, true)) { - threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> - clusterService.submitStateUpdateTask("install-ml-metadata", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - // If the metadata has been added already don't try to update - if (currentState.metaData().custom(MLMetadataField.TYPE) != null) { - return currentState; - } - ClusterState.Builder builder = new ClusterState.Builder(currentState); - MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); - metadataBuilder.putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA); - builder.metaData(metadataBuilder.build()); - return builder.build(); - } - - @Override - public void onFailure(String source, Exception e) { - installMlMetadataCheck.set(false); - logger.error("unable to install ml metadata", e); - } - }) - ); - } - } else { - installMlMetadataCheck.set(false); - } - } - private void installDailyMaintenanceService() { if (mlDailyMaintenanceService == null) { mlDailyMaintenanceService = new MlDailyMaintenanceService(clusterService.getClusterName(), threadPool, client); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 7d113a838dd45..fa649e541963d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; @@ -92,8 +91,7 @@ public TransportCloseJobAction(Settings settings, TransportService transportServ static void resolveAndValidateJobId(CloseJobAction.Request request, ClusterState state, List openJobIds, List closingJobIds) { PersistentTasksCustomMetaData tasksMetaData = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - MlMetadata maybeNull = state.metaData().custom(MLMetadataField.TYPE); - final MlMetadata mlMetadata = (maybeNull == null) ? MlMetadata.EMPTY_METADATA : maybeNull; + final MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); List failedJobs = new ArrayList<>(); @@ -107,7 +105,7 @@ static void resolveAndValidateJobId(CloseJobAction.Request request, ClusterState }; Set expandedJobIds = mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs()); - expandedJobIds.stream().forEach(jobIdProcessor::accept); + expandedJobIds.forEach(jobIdProcessor::accept); if (request.isForce() == false && failedJobs.size() > 0) { if (expandedJobIds.size() == 1) { throw ExceptionsHelper.conflictStatusException("cannot close job [{}] because it failed, use force close", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 79995af9e62aa..220d97e89ba14 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -119,8 +119,8 @@ protected DeleteDatafeedAction.Response newResponse(boolean acknowledged) { } @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { + MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState); PersistentTasksCustomMetaData persistentTasks = currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java index 19e6ec595042a..e14cd76aa183a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Detector; @@ -60,8 +59,7 @@ protected void doExecute(DeleteFilterAction.Request request, ActionListener jobs = currentMlMetadata.getJobs(); + Map jobs = MlMetadata.getMlMetadata(state).getJobs(); List currentlyUsedBy = new ArrayList<>(); for (Job job : jobs.values()) { List detectors = job.getAnalysisConfig().getDetectors(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index b664487c77df6..90821b302fc5c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -200,10 +200,9 @@ public void onFailure(Exception e) { void markJobAsDeleting(String jobId, ActionListener listener, boolean force) { clusterService.submitStateUpdateTask("mark-job-as-deleted", new ClusterStateUpdateTask() { @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { PersistentTasksCustomMetaData tasks = currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE); - MlMetadata.Builder builder = new MlMetadata.Builder(currentMlMetadata); + MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); builder.markJobAsDeleted(jobId, tasks, force); return buildNewClusterState(currentState, builder); } @@ -248,11 +247,7 @@ public void onTimeout(TimeValue timeout) { } static boolean jobIsDeletedFromState(String jobId, ClusterState clusterState) { - MlMetadata metadata = clusterState.metaData().custom(MLMetadataField.TYPE); - if (metadata == null) { - return true; - } - return !metadata.getJobs().containsKey(jobId); + return !MlMetadata.getMlMetadata(clusterState).getJobs().containsKey(jobId); } private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java index 3f8ca39a0f124..e939c6ef31a2f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java @@ -56,8 +56,8 @@ protected void masterOperation(FinalizeJobExecutionAction.Request request, Clust logger.debug("finalizing jobs [{}]", jobIdString); clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState); MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata); Date finishedTime = new Date(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java index d59febf12cd36..c81bb2642236d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; @@ -70,7 +69,7 @@ protected void doExecute(GetCalendarEventsAction.Request request, if (request.getJobId() != null) { ClusterState state = clusterService.state(); - MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE); + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state); List jobGroups; String requestId = request.getJobId(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java index c94449e8c3616..b4e3851eda820 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -52,10 +51,7 @@ protected void masterOperation(GetDatafeedsAction.Request request, ClusterState ActionListener listener) throws Exception { logger.debug("Get datafeed '{}'", request.getDatafeedId()); - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds()); List datafeedConfigs = new ArrayList<>(); for (String expandedDatafeedId : expandedDatafeedIds) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index 0d5b2b202098b..41c8379c39fb8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; @@ -56,11 +55,7 @@ protected void masterOperation(GetDatafeedsStatsAction.Request request, ClusterS ActionListener listener) throws Exception { logger.debug("Get stats for datafeed '{}'", request.getDatafeedId()); - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } - + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds()); PersistentTasksCustomMetaData tasksInProgress = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index f8f8fffa5b710..78bfe2c7bc6b0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; @@ -69,8 +68,7 @@ public TransportGetJobsStatsAction(Settings settings, TransportService transport @Override protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener listener) { - MlMetadata clusterMlMetadata = clusterService.state().metaData().custom(MLMetadataField.TYPE); - MlMetadata mlMetadata = (clusterMlMetadata == null) ? MlMetadata.EMPTY_METADATA : clusterMlMetadata; + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state()); request.setExpandedJobsIds(new ArrayList<>(mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs()))); ActionListener finalListener = listener; listener = ActionListener.wrap(response -> gatherStatsForClosedJobs(mlMetadata, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 5ae84129254e5..5e829c72756a7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -49,7 +49,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -163,7 +162,7 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); Job job = mlMetadata.getJobs().get(jobId); Set compatibleJobTypes = Job.getCompatibleJobTypes(node.getVersion()); if (compatibleJobTypes.contains(job.getJobType()) == false) { @@ -474,8 +473,7 @@ public void onFailure(Exception e) { // Step 3. Update established model memory for pre-6.1 jobs that haven't had it set ActionListener missingMappingsListener = ActionListener.wrap( response -> { - MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE); - Job job = mlMetadata.getJobs().get(jobParams.getJobId()); + Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(jobParams.getJobId()); if (job != null) { Version jobVersion = job.getJobVersion(); Long jobEstablishedModelMemory = job.getEstablishedModelMemory(); @@ -650,8 +648,7 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(OpenJobAction.JobP public void validate(OpenJobAction.JobParams params, ClusterState clusterState) { // If we already know that we can't find an ml node because all ml nodes are running at capacity or // simply because there are no ml nodes in the cluster then we fail quickly here: - MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE); - TransportOpenJobAction.validate(params.getJobId(), mlMetadata); + TransportOpenJobAction.validate(params.getJobId(), MlMetadata.getMlMetadata(clusterState)); PersistentTasksCustomMetaData.Assignment assignment = selectLeastLoadedMlNode(params.getJobId(), clusterState, maxConcurrentJobAllocations, fallbackMaxNumberOfOpenJobs, maxMachineMemoryPercent, logger); if (assignment.getExecutorNode() == null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 2ffb318dc4fb2..9cba0b20c51b9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; @@ -52,7 +51,7 @@ public TransportPreviewDatafeedAction(Settings settings, ThreadPool threadPool, @Override protected void doExecute(PreviewDatafeedAction.Request request, ActionListener listener) { - MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state()); DatafeedConfig datafeed = mlMetadata.getDatafeed(request.getDatafeedId()); if (datafeed == null) { throw ExceptionsHelper.missingDatafeedException(request.getDatafeedId()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java index 11e8fbf912c5b..1393d663fb251 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java @@ -14,9 +14,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -26,16 +24,12 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetaIndex; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -43,17 +37,15 @@ public class TransportPutCalendarAction extends HandledTransportAction { private final Client client; - private final ClusterService clusterService; @Inject public TransportPutCalendarAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client, ClusterService clusterService) { + Client client) { super(settings, PutCalendarAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PutCalendarAction.Request::new); this.client = client; - this.clusterService = clusterService; } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index 0c492a0817c98..2b4304a205b13 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -141,7 +141,7 @@ public ClusterState execute(ClusterState currentState) { } private ClusterState putDatafeed(PutDatafeedAction.Request request, ClusterState clusterState) { - MlMetadata currentMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata currentMetadata = MlMetadata.getMlMetadata(clusterState); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) .putDatafeed(request.getDatafeed(), threadPool.getThreadContext()).build(); return ClusterState.builder(clusterState).metaData( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index aba52b38ecd27..71afa656a4a69 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -130,7 +130,7 @@ public void onFailure(Exception e) { }; // Verify data extractor factory can be created, then start persistent task - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); validate(params.getDatafeedId(), mlMetadata, tasks); DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId()); @@ -221,9 +221,8 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(StartDatafeedActio @Override public void validate(StartDatafeedAction.DatafeedParams params, ClusterState clusterState) { - MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE); PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - TransportStartDatafeedAction.validate(params.getDatafeedId(), mlMetadata, tasks); + TransportStartDatafeedAction.validate(params.getDatafeedId(), MlMetadata.getMlMetadata(clusterState), tasks); new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId()).checkDatafeedTaskCanBeCreated(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 5ed923e3b2370..b5f16ff191fe2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -130,7 +130,7 @@ protected void doExecute(Task task, StopDatafeedAction.Request request, ActionLi new ActionListenerResponseHandler<>(listener, StopDatafeedAction.Response::new)); } } else { - MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); List startedDatafeeds = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java index 5df7e890a8fa4..0524cb28a0c11 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; @@ -48,8 +47,7 @@ public TransportUpdateCalendarJobAction(Settings settings, ThreadPool threadPool @Override protected void doExecute(UpdateCalendarJobAction.Request request, ActionListener listener) { ClusterState clusterState = clusterService.state(); - MlMetadata maybeNullMetaData = clusterState.getMetaData().custom(MLMetadataField.TYPE); - final MlMetadata mlMetadata = maybeNullMetaData == null ? MlMetadata.EMPTY_METADATA : maybeNullMetaData; + final MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); Set jobIdsToAdd = Strings.tokenizeByCommaToSet(request.getJobIdsToAddExpression()); Set jobIdsToRemove = Strings.tokenizeByCommaToSet(request.getJobIdsToRemoveExpression()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java index e50adfa8275e2..4d752fe294081 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java @@ -63,9 +63,9 @@ protected PutDatafeedAction.Response newResponse(boolean acknowledged) { } @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterState execute(ClusterState currentState) { DatafeedUpdate update = request.getUpdate(); - MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState); PersistentTasksCustomMetaData persistentTasks = currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 5daf8ce28964e..e27156b512613 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; @@ -80,10 +79,7 @@ public DatafeedManager(ThreadPool threadPool, Client client, ClusterService clus public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer taskHandler) { String datafeedId = task.getDatafeedId(); ClusterState state = clusterService.state(); - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); DatafeedConfig datafeed = mlMetadata.getDatafeed(datafeedId); Job job = mlMetadata.getJobs().get(datafeed.getJobId()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index e52906a605d0d..37f9715d09464 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.JobState; @@ -33,7 +32,7 @@ public class DatafeedNodeSelector { private final IndexNameExpressionResolver resolver; public DatafeedNodeSelector(ClusterState clusterState, IndexNameExpressionResolver resolver, String datafeedId) { - MlMetadata mlMetadata = Objects.requireNonNull(clusterState.metaData().custom(MLMetadataField.TYPE)); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); this.datafeed = mlMetadata.getDatafeed(datafeedId); this.jobTask = MlMetadata.getJobTask(datafeed.getJobId(), tasks); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 0f6a0f44cbf02..2d67e64ec60e7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -133,8 +133,7 @@ public Job getJobOrThrowIfUnknown(String jobId) { * @throws ResourceNotFoundException if no job matches {@code jobId} */ public static Job getJobOrThrowIfUnknown(String jobId, ClusterState clusterState) { - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); - Job job = (mlMetadata == null) ? null : mlMetadata.getJobs().get(jobId); + Job job = MlMetadata.getMlMetadata(clusterState).getJobs().get(jobId); if (job == null) { throw ExceptionsHelper.missingJobException(jobId); } @@ -142,11 +141,7 @@ public static Job getJobOrThrowIfUnknown(String jobId, ClusterState clusterState } private Set expandJobIds(String expression, boolean allowNoJobs, ClusterState clusterState) { - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } - return mlMetadata.expandJobIds(expression, allowNoJobs); + return MlMetadata.getMlMetadata(clusterState).expandJobIds(expression, allowNoJobs); } /** @@ -160,7 +155,7 @@ private Set expandJobIds(String expression, boolean allowNoJobs, Cluster */ public QueryPage expandJobs(String expression, boolean allowNoJobs, ClusterState clusterState) { Set expandedJobIds = expandJobIds(expression, allowNoJobs, clusterState); - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); List jobs = new ArrayList<>(); for (String expandedJobId : expandedJobIds) { jobs.add(mlMetadata.getJobs().get(expandedJobId)); @@ -188,8 +183,8 @@ public void putJob(PutJobAction.Request request, AnalysisRegistry analysisRegist DEPRECATION_LOGGER.deprecated("Creating jobs with delimited data format is deprecated. Please use xcontent instead."); } - MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (currentMlMetadata != null && currentMlMetadata.getJobs().containsKey(job.getId())) { + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state); + if (currentMlMetadata.getJobs().containsKey(job.getId())) { actionListener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId())); return; } @@ -469,8 +464,8 @@ protected Boolean newResponse(boolean acknowledged) { } @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(currentState); if (currentMlMetadata.getJobs().containsKey(jobId) == false) { // We wouldn't have got here if the job never existed so // the Job must have been deleted by another action. @@ -560,8 +555,7 @@ public ClusterState execute(ClusterState currentState) { } private static MlMetadata.Builder createMlMetadataBuilder(ClusterState currentState) { - MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); - return new MlMetadata.Builder(currentMlMetadata); + return new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); } private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index aa003d29559f0..8364e015a3456 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -61,12 +60,8 @@ private void removeData(Iterator jobIterator, ActionListener liste } private Iterator newJobIterator() { - List jobs = new ArrayList<>(); ClusterState clusterState = clusterService.state(); - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); - if (mlMetadata != null) { - jobs.addAll(mlMetadata.getJobs().values()); - } + List jobs = new ArrayList<>(MlMetadata.getMlMetadata(clusterState).getJobs().values()); return createVolatileCursorIterator(jobs); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java index b07b025e09e56..fd4085d202041 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java @@ -11,10 +11,8 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; @@ -24,7 +22,6 @@ import org.elasticsearch.xpack.ml.job.persistence.BatchedStateDocIdsIterator; import java.util.Arrays; -import java.util.Collections; import java.util.Deque; import java.util.List; import java.util.Objects; @@ -84,12 +81,7 @@ private BulkRequestBuilder findUnusedStateDocs() { } private Set getJobIds() { - ClusterState clusterState = clusterService.state(); - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); - if (mlMetadata != null) { - return mlMetadata.getJobs().keySet(); - } - return Collections.emptySet(); + return MlMetadata.getMlMetadata(clusterService.state()).getJobs().keySet(); } private void executeDeleteUnusedStateDocs(BulkRequestBuilder deleteUnusedStateRequestBuilder, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java index b685e6b961d3c..eba2054054c0d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java @@ -65,7 +65,7 @@ public class MachineLearningFeatureSetTests extends ESTestCase { private XPackLicenseState licenseState; @Before - public void init() throws Exception { + public void init() { commonSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) .put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false) @@ -232,9 +232,28 @@ public void testUsageGivenMlMetadataNotInstalled() throws Exception { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); source = new XContentSource(builder); - assertThat(source.getValue("jobs"), equalTo(Collections.emptyMap())); - assertThat(source.getValue("datafeeds"), equalTo(Collections.emptyMap())); } + + assertThat(source.getValue("jobs._all.count"), equalTo(0)); + assertThat(source.getValue("jobs._all.detectors.min"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.detectors.max"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.detectors.total"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.detectors.avg"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.min"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.max"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.total"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.avg"), equalTo(0.0)); + + assertThat(source.getValue("jobs.opening"), is(nullValue())); + assertThat(source.getValue("jobs.opened"), is(nullValue())); + assertThat(source.getValue("jobs.closing"), is(nullValue())); + assertThat(source.getValue("jobs.closed"), is(nullValue())); + assertThat(source.getValue("jobs.failed"), is(nullValue())); + + assertThat(source.getValue("datafeeds._all.count"), equalTo(0)); + + assertThat(source.getValue("datafeeds.started"), is(nullValue())); + assertThat(source.getValue("datafeeds.stopped"), is(nullValue())); } private void givenJobs(List jobs, List jobsStats) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java index ce46139a18bdb..d324d17f40e25 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -22,20 +21,15 @@ import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.junit.Before; -import org.mockito.Mockito; import java.net.InetAddress; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.mock.orig.Mockito.doAnswer; -import static org.elasticsearch.mock.orig.Mockito.times; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -68,7 +62,7 @@ public void setUpMocks() { when(clusterService.getClusterName()).thenReturn(CLUSTER_NAME); } - public void testInitialize() throws Exception { + public void testInitialize() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) @@ -80,11 +74,10 @@ public void testInitialize() throws Exception { .build(); initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any()); assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(true)); } - public void testInitialize_noMasterNode() throws Exception { + public void testInitialize_noMasterNode() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) @@ -94,11 +87,10 @@ public void testInitialize_noMasterNode() throws Exception { .build(); initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any()); assertThat(initializationService.getDailyMaintenanceService(), is(nullValue())); } - public void testInitialize_alreadyInitialized() throws Exception { + public void testInitialize_alreadyInitialized() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) @@ -113,67 +105,10 @@ public void testInitialize_alreadyInitialized() throws Exception { initializationService.setDailyMaintenanceService(initialDailyMaintenanceService); initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any()); assertSame(initialDailyMaintenanceService, initializationService.getDailyMaintenanceService()); } - public void testInitialize_onlyOnce() throws Exception { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); - - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) - .localNodeId("_node_id") - .masterNodeId("_node_id")) - .metaData(MetaData.builder()) - .build(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - - verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any()); - } - - public void testInitialize_reintialiseAfterFailure() throws Exception { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); - - // Fail the first cluster state update - AtomicBoolean onFailureCalled = new AtomicBoolean(false); - Mockito.doAnswer(invocation -> { - ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1]; - task.onFailure("mock a failure", new IllegalStateException()); - onFailureCalled.set(true); - return null; - }).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) - .localNodeId("_node_id") - .masterNodeId("_node_id")) - .metaData(MetaData.builder()) - .build(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - assertTrue("Something went wrong mocking the cluster update task", onFailureCalled.get()); - verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - // 2nd update succeeds - AtomicReference clusterStateHolder = new AtomicReference<>(); - Mockito.doAnswer(invocation -> { - ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1]; - clusterStateHolder.set(task.execute(cs)); - return null; - }).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - assertTrue("Something went wrong mocking the sucessful cluster update task", clusterStateHolder.get() != null); - verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - // 3rd update won't be called as ML Metadata has been installed - initializationService.clusterChanged(new ClusterChangedEvent("_source", clusterStateHolder.get(), clusterStateHolder.get())); - verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - } - - public void testNodeGoesFromMasterToNonMasterAndBack() throws Exception { + public void testNodeGoesFromMasterToNonMasterAndBack() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class); initializationService.setDailyMaintenanceService(initialDailyMaintenanceService); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index a5d3faf323434..bd722ebf8ef9a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -251,11 +251,11 @@ public void testStart_GivenNewlyCreatedJobLoopBackAndRealtime() throws Exception } public void testDatafeedTaskWaitsUntilJobIsOpened() { - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -269,8 +269,8 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder anotherJobCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs.build(), cs.build())); @@ -280,8 +280,8 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder jobOpenedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged( new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs.build())); @@ -294,8 +294,8 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -308,8 +308,8 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.FAILED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", updatedCs.build(), cs.build())); @@ -322,8 +322,8 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -340,8 +340,8 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", cs.build(), updatedCs.build())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java index 778ffe6dfae2d..357c2bc232552 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java @@ -103,7 +103,7 @@ public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { } private ClusterState markJobAsDeleted(String jobId, ClusterState currentState) { - MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState); assertNotNull(mlMetadata); MlMetadata.Builder builder = new MlMetadata.Builder(mlMetadata); @@ -116,7 +116,7 @@ private ClusterState markJobAsDeleted(String jobId, ClusterState currentState) { } private ClusterState removeJobFromClusterState(String jobId, ClusterState currentState) { - MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metaData().custom(MLMetadataField.TYPE)); + MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); builder.deleteJob(jobId, currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE)); ClusterState.Builder newState = ClusterState.builder(currentState); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index 667be5d41ea2b..64b3bfbab45c8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -326,7 +326,7 @@ private JobManager createJobManager() { private ClusterState createClusterState() { ClusterState.Builder builder = ClusterState.builder(new ClusterName("_name")); - builder.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA)); + builder.metaData(MetaData.builder()); return builder.build(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java index 9fea904a99fa1..ef87fe392dd75 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java @@ -39,8 +39,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -93,7 +91,7 @@ public void testCreateJobResultsIndex() { AtomicReference resultHolder = new AtomicReference<>(); ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(ImmutableOpenMap.of())) + .metaData(MetaData.builder().indices(ImmutableOpenMap.of())) .build(); ClusterService clusterService = mock(ClusterService.class); @@ -157,7 +155,7 @@ public void testCreateJobWithExistingIndex() { .fPut(AnomalyDetectorsIndex.jobResultsAliasedName("foo"), indexMetaData).build(); ClusterState cs2 = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build(); + .metaData(MetaData.builder().indices(indexMap)).build(); ClusterService clusterService = mock(ClusterService.class); @@ -209,7 +207,7 @@ public void testCreateJobRelatedIndicies_createsAliasBecauseIndexNameIsSet() { ImmutableOpenMap indexMap = ImmutableOpenMap.builder().build(); ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build(); + .metaData(MetaData.builder().indices(indexMap)).build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 9f3d1c779f8eb..f330c501b76f9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -28,7 +28,6 @@ import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -272,8 +271,7 @@ public static GetDatafeedsStatsAction.Response.DatafeedStats getDatafeedStats(St } public static void deleteAllDatafeeds(Logger logger, Client client) throws Exception { - MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData(); - MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState()); try { logger.info("Closing all datafeeds (using _all)"); StopDatafeedAction.Response stopResponse = client @@ -312,8 +310,7 @@ public static void deleteAllDatafeeds(Logger logger, Client client) throws Excep } public static void deleteAllJobs(Logger logger, Client client) throws Exception { - MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData(); - MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState()); try { CloseJobAction.Request closeRequest = new CloseJobAction.Request(MetaData.ALL); diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen index d56ae2f4d1adb..67d5168452031 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.core.ssl.CertificateGenerateTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil index c2502bd734ffe..eb245fd0b0ee1 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.core.ssl.CertificateTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate index eb3c81febdfb7..dc3f360361d9b 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \ - "$@" + "$@" \ No newline at end of file diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata index 92200d82e1264..48274ab7efa42 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords index e6aaa00d64796..d896efcfcbe28 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen index e8c4f10c044c3..954b0884007ff 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users b/x-pack/plugin/security/src/main/bin/elasticsearch-users index 2d9ed8df93dc4..6caeece8cbccd 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.file.tool.UsersTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env b/x-pack/plugin/security/src/main/bin/x-pack-security-env index 3a2b15e13fa4a..7d8010deec5ea 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env @@ -4,7 +4,5 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/x-pack-env - # include x-pack-security jars in classpath ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-security/*" diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java index f4f98a4e4ffd7..bac4fdcfb2a63 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.sql.execution.search; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -28,6 +26,7 @@ import java.util.List; import static java.util.Collections.singletonList; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.search.sort.SortBuilders.scoreSort; import static org.elasticsearch.search.sort.SortBuilders.scriptSort; @@ -37,20 +36,23 @@ public abstract class SourceGenerator { private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryBuilder filter, Integer size) { - final SearchSourceBuilder source = new SearchSourceBuilder(); + QueryBuilder finalQuery = null; // add the source - if (container.query() == null) { + if (container.query() != null) { if (filter != null) { - source.query(new ConstantScoreQueryBuilder(filter)); + finalQuery = boolQuery().must(container.query().asBuilder()).filter(filter); + } else { + finalQuery = container.query().asBuilder(); } } else { if (filter != null) { - source.query(new BoolQueryBuilder().must(container.query().asBuilder()).filter(filter)); - } else { - source.query(container.query().asBuilder()); + finalQuery = boolQuery().filter(filter); } } + final SearchSourceBuilder source = new SearchSourceBuilder(); + source.query(finalQuery); + SqlSourceBuilder sortBuilder = new SqlSourceBuilder(); // Iterate through all the columns requested, collecting the fields that // need to be retrieved from the result documents diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java index 64949fe318ce6..47075773b0166 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.sql.querydsl.query; -import java.util.Objects; - import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.xpack.sql.tree.Location; +import java.util.Objects; + import static org.elasticsearch.index.query.QueryBuilders.boolQuery; /** @@ -63,9 +63,8 @@ public void enrichNestedSort(NestedSortBuilder sort) { public QueryBuilder asBuilder() { BoolQueryBuilder boolQuery = boolQuery(); if (isAnd) { - // TODO are we throwing out score by using filter? - boolQuery.filter(left.asBuilder()); - boolQuery.filter(right.asBuilder()); + boolQuery.must(left.asBuilder()); + boolQuery.must(right.asBuilder()); } else { boolQuery.should(left.asBuilder()); boolQuery.should(right.asBuilder()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java index f038a20823dbc..816b665133583 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java @@ -5,9 +5,6 @@ */ package org.elasticsearch.xpack.sql.execution.search; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.ConstantScoreQueryBuilder; -import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -28,6 +25,8 @@ import org.elasticsearch.xpack.sql.type.KeywordEsField; import static java.util.Collections.singletonList; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.search.sort.SortBuilders.scoreSort; @@ -42,22 +41,22 @@ public void testNoQueryNoFilter() { public void testQueryNoFilter() { QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); - assertEquals(new MatchQueryBuilder("foo", "bar").operator(Operator.OR), sourceBuilder.query()); + assertEquals(matchQuery("foo", "bar").operator(Operator.OR), sourceBuilder.query()); } public void testNoQueryFilter() { QueryContainer container = new QueryContainer(); - QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + QueryBuilder filter = matchQuery("bar", "baz"); SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); - assertEquals(new ConstantScoreQueryBuilder(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + assertEquals(boolQuery().filter(matchQuery("bar", "baz")), sourceBuilder.query()); } public void testQueryFilter() { QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); - QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + QueryBuilder filter = matchQuery("bar", "baz"); SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); - assertEquals(new BoolQueryBuilder().must(new MatchQueryBuilder("foo", "bar").operator(Operator.OR)) - .filter(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + assertEquals(boolQuery().must(matchQuery("foo", "bar").operator(Operator.OR)).filter(matchQuery("bar", "baz")), + sourceBuilder.query()); } public void testLimit() { diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval index 6de537660cbc4..c7185d167562c 100755 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval @@ -4,16 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-watcher-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-watcher-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool \ "$@" diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env index 13718a01b4330..83045297a0c68 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env @@ -4,7 +4,5 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/x-pack-env - # include x-pack-security jars in classpath ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-watcher/*" diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java index 3019a00351c28..e0cf0efac472e 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -16,7 +16,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.CheckedSupplier; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.XContentHelper; @@ -33,12 +32,11 @@ import java.sql.JDBCType; import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.TreeMap; import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; @@ -396,19 +394,23 @@ public void testBasicTranslateQueryWithFilter() throws IOException { assertNotNull(query); @SuppressWarnings("unchecked") - Map constantScore = (Map) query.get("constant_score"); - assertNotNull(constantScore); + Map bool = (Map) query.get("bool"); + assertNotNull(bool); @SuppressWarnings("unchecked") - Map filter = (Map) constantScore.get("filter"); + List filter = (List) bool.get("filter"); assertNotNull(filter); @SuppressWarnings("unchecked") - Map match = (Map) filter.get("match"); - assertNotNull(match); + Map map = (Map) filter.get(0); + assertNotNull(map); @SuppressWarnings("unchecked") - Map matchQuery = (Map) match.get("test"); + Map matchQ = (Map) map.get("match"); + + @SuppressWarnings("unchecked") + Map matchQuery = (Map) matchQ.get("test"); + assertNotNull(matchQuery); assertEquals("foo", matchQuery.get("query")); }