diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 7e9ccef2d2120..ffbeed8e6e9a7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -528,11 +528,12 @@ class BuildPlugin implements Plugin { project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, // just make a copy. + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-${project.version}.pom" doLast { project.copy { from generatePOMTask.destination into "${project.buildDir}/distributions" - rename { "${project.archivesBaseName}-${project.version}.pom" } + rename { generatePOMTask.ext.pomFileName } } } // build poms with assemble (if the assemble task exists) @@ -798,6 +799,8 @@ class BuildPlugin implements Plugin { systemProperty 'tests.task', path systemProperty 'tests.security.manager', 'true' systemProperty 'jna.nosys', 'true' + // TODO: remove this deprecation compatibility setting for 7.0 + systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false' systemProperty 'es.scripting.exception_for_missing_value', 'true' systemProperty 'compiler.java', project.ext.compilerJavaVersion.getMajorVersion() if (project.ext.inFipsJvm) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index b44f24064921a..778ff091870ed 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -19,23 +19,19 @@ package org.elasticsearch.gradle.plugin import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin -import nebula.plugin.info.scm.ScmInfoPlugin +import nebula.plugin.publishing.maven.MavenScmPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask -import org.gradle.api.InvalidUserDataException import org.gradle.api.Project -import org.gradle.api.Task -import org.gradle.api.XmlProvider import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin +import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.bundling.Zip +import org.gradle.jvm.tasks.Jar -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.StandardCopyOption import java.util.regex.Matcher import java.util.regex.Pattern /** @@ -55,16 +51,10 @@ public class PluginBuildPlugin extends BuildPlugin { String name = project.pluginProperties.extension.name project.archivesBaseName = name - if (project.pluginProperties.extension.hasClientJar) { - // for plugins which work with the transport client, we copy the jar - // file to a new name, copy the nebula generated pom to the same name, - // and generate a different pom for the zip - addClientJarPomGeneration(project) - addClientJarTask(project) - } - // while the jar isn't normally published, we still at least build a pom of deps - // in case it is published, for instance when other plugins extend this plugin - configureJarPom(project) + // set teh project description so it will be picked up by publishing + project.description = project.pluginProperties.extension.description + + configurePublishing(project) project.integTestCluster.dependsOn(project.bundlePlugin) project.tasks.run.dependsOn(project.bundlePlugin) @@ -92,6 +82,32 @@ public class PluginBuildPlugin extends BuildPlugin { project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build } + private void configurePublishing(Project project) { + // Only configure publishing if applied externally + if (project.pluginProperties.extension.hasClientJar) { + project.plugins.apply(MavenScmPlugin.class) + // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName + project.tasks.withType(Jar) { + baseName = baseName + "-client" + } + // always configure publishing for client jars + project.plugins.apply(MavenScmPlugin.class) + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + "-client" + ) + project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.version}.pom" + } + } else { + project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + project.publishing.publications.nebula(MavenPublication).artifactId( + project.pluginProperties.extension.name + ) + } + + } + } + private static void configureDependencies(Project project) { project.dependencies { compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" @@ -159,33 +175,6 @@ public class PluginBuildPlugin extends BuildPlugin { } /** Adds a task to move jar and associated files to a "-client" name. */ - protected static void addClientJarTask(Project project) { - Task clientJar = project.tasks.create('clientJar') - clientJar.dependsOn(project.jar, project.tasks.generatePomFileForClientJarPublication, project.javadocJar, project.sourcesJar) - clientJar.doFirst { - Path jarFile = project.jar.outputs.files.singleFile.toPath() - String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}") - Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING) - - String clientPomFileName = clientFileName.replace('.jar', '.pom') - Files.copy( - project.tasks.generatePomFileForClientJarPublication.outputs.files.singleFile.toPath(), - jarFile.resolveSibling(clientPomFileName), - StandardCopyOption.REPLACE_EXISTING - ) - - String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar') - String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar') - Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName), - StandardCopyOption.REPLACE_EXISTING) - - String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar') - String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar') - Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName), - StandardCopyOption.REPLACE_EXISTING) - } - project.assemble.dependsOn(clientJar) - } static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/) @@ -207,39 +196,11 @@ public class PluginBuildPlugin extends BuildPlugin { /** Adds nebula publishing task to generate a pom file for the plugin. */ protected static void addClientJarPomGeneration(Project project) { - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - clientJar(MavenPublication) { - from project.components.java - artifactId = project.pluginProperties.extension.name + '-client' - pom.withXml { XmlProvider xml -> - Node root = xml.asNode() - root.appendNode('name', project.pluginProperties.extension.name) - root.appendNode('description', project.pluginProperties.extension.description) - root.appendNode('url', urlFromOrigin(project.scminfo.origin)) - Node scmNode = root.appendNode('scm') - scmNode.appendNode('url', project.scminfo.origin) - } - } - } - } + project.plugins.apply(MavenScmPlugin.class) + project.description = project.pluginProperties.extension.description } /** Configure the pom for the main jar of this plugin */ - protected static void configureJarPom(Project project) { - project.plugins.apply(ScmInfoPlugin.class) - project.plugins.apply(MavenPublishPlugin.class) - - project.publishing { - publications { - nebula(MavenPublication) { - artifactId project.pluginProperties.extension.name - } - } - } - } protected void addNoticeGeneration(Project project) { File licenseFile = project.pluginProperties.extension.licenseFile diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 3709805680d7a..ace32dff07bc8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.precommit import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin +import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.plugins.JavaBasePlugin @@ -93,6 +94,11 @@ class PrecommitTasks { signaturesURLs = project.forbiddenApis.signaturesURLs + [ getClass().getResource('/forbidden/es-server-signatures.txt') ] } + // forbidden apis doesn't support Java 11, so stop at 10 + String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ? + JavaVersion.VERSION_1_10 : + project.compilerJavaVersion).getMajorVersion() + targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}" } Task forbiddenApis = project.tasks.findByName('forbiddenApis') forbiddenApis.group = "" // clear group, so this does not show up under verification tasks diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 33e136a66f44f..cc179e12e3163 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -20,5 +20,14 @@ org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +@defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users +org.elasticsearch.common.logging.DeprecationLogger +org.elasticsearch.common.logging.ESLoggerFactory +org.elasticsearch.common.logging.LogConfigurator +org.elasticsearch.common.logging.LoggerMessageFormat +org.elasticsearch.common.logging.Loggers +org.elasticsearch.common.logging.NodeNamePatternConverter +org.elasticsearch.common.logging.PrefixLogger + @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.elasticsearch.common.xcontent.LoggingDeprecationHandler diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index b0b23ed9fdd46..76af65cde2352 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; +import org.apache.http.util.EntityUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; @@ -34,6 +35,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.client.Request; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; @@ -178,6 +180,8 @@ public void testClusterHealthYellowClusterLevel() throws IOException { request.level(ClusterHealthRequest.Level.CLUSTER); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(0)); } @@ -190,6 +194,8 @@ public void testClusterHealthYellowIndicesLevel() throws IOException { request.level(ClusterHealthRequest.Level.INDICES); ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync); + logger.info("Shard stats\n{}", EntityUtils.toString( + client().performRequest(new Request("GET", "/_cat/shards")).getEntity())); assertYellowShards(response); assertThat(response.getIndices().size(), equalTo(2)); for (Map.Entry entry : response.getIndices().entrySet()) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 0037460150f1a..95a29e99e5266 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.protocol.xpack.ml.DeleteJobRequest; import org.elasticsearch.protocol.xpack.ml.DeleteJobResponse; @@ -36,6 +37,7 @@ import static org.hamcrest.Matchers.is; +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32993") public class MachineLearningIT extends ESRestHighLevelClientTestCase { public void testPutJob() throws Exception { diff --git a/docs/build.gradle b/docs/build.gradle index 804695ae41a36..c691253917f49 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -41,6 +41,9 @@ integTestCluster { // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults systemProperty 'es.scripting.use_java_time', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false' + + // TODO: remove this deprecation compatibility setting for 7.0 + systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'false' } // remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed @@ -393,25 +396,25 @@ buildRestTests.setups['stored_scripted_metric_script'] = ''' - do: put_script: id: "my_init_script" - body: { "script": { "lang": "painless", "source": "params._agg.transactions = []" } } + body: { "script": { "lang": "painless", "source": "state.transactions = []" } } - match: { acknowledged: true } - do: put_script: id: "my_map_script" - body: { "script": { "lang": "painless", "source": "params._agg.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } } + body: { "script": { "lang": "painless", "source": "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)" } } - match: { acknowledged: true } - do: put_script: id: "my_combine_script" - body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in params._agg.transactions) { profit += t; } return profit" } } + body: { "script": { "lang": "painless", "source": "double profit = 0;for (t in state.transactions) { profit += t; } return profit" } } - match: { acknowledged: true } - do: put_script: id: "my_reduce_script" - body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in params._aggs) { profit += a; } return profit" } } + body: { "script": { "lang": "painless", "source": "double profit = 0;for (a in states) { profit += a; } return profit" } } - match: { acknowledged: true } ''' diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index f093b6ebcfae0..f92e364bae102 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -6,23 +6,70 @@ ["float",id="cluster-nodes"] == Node specification -Most cluster level APIs allow to specify which nodes to execute on (for -example, getting the node stats for a node). Nodes can be identified in -the APIs either using their internal node id, the node name, address, -custom attributes, or just the `_local` node receiving the request. For -example, here are some sample executions of nodes info: +Some cluster-level APIs may operate on a subset of the nodes which can be +specified with _node filters_. For example, the <>, +<>, and <> APIs +can all report results from a filtered set of nodes rather than from all nodes. + +_Node filters_ are written as a comma-separated list of individual filters, +each of which adds or removes nodes from the chosen subset. Each filter can be +one of the following: + +* `_all`, to add all nodes to the subset. +* `_local`, to add the local node to the subset. +* `_master`, to add the currently-elected master node to the subset. +* a node id or name, to add this node to the subset. +* an IP address or hostname, to add all matching nodes to the subset. +* a pattern, using `*` wildcards, which adds all nodes to the subset + whose name, address or hostname matches the pattern. +* `master:true`, `data:true`, `ingest:true` or `coordinating_only:true`, which + respectively add to the subset all master-eligible nodes, all data nodes, + all ingest nodes, and all coordinating-only nodes. +* `master:false`, `data:false`, `ingest:false` or `coordinating_only:false`, + which respectively remove from the subset all master-eligible nodes, all data + nodes, all ingest nodes, and all coordinating-only nodes. +* a pair of patterns, using `*` wildcards, of the form `attrname:attrvalue`, + which adds to the subset all nodes with a custom node attribute whose name + and value match the respective patterns. Custom node attributes are + configured by setting properties in the configuration file of the form + `node.attr.attrname: attrvalue`. + +NOTE: node filters run in the order in which they are given, which is important +if using filters that remove nodes from the set. For example +`_all,master:false` means all the nodes except the master-eligible ones, but +`master:false,_all` means the same as `_all` because the `_all` filter runs +after the `master:false` filter. + +NOTE: if no filters are given, the default is to select all nodes. However, if +any filters are given then they run starting with an empty chosen subset. This +means that filters such as `master:false` which remove nodes from the chosen +subset are only useful if they come after some other filters. When used on its +own, `master:false` selects no nodes. + +Here are some examples of the use of node filters with the +<> APIs. [source,js] -------------------------------------------------- -# Local +# If no filters are given, the default is to select all nodes +GET /_nodes +# Explicitly select all nodes +GET /_nodes/_all +# Select just the local node GET /_nodes/_local -# Address -GET /_nodes/10.0.0.3,10.0.0.4 -GET /_nodes/10.0.0.* -# Names +# Select the elected master node +GET /_nodes/_master +# Select nodes by name, which can include wildcards GET /_nodes/node_name_goes_here GET /_nodes/node_name_goes_* -# Attributes (set something like node.attr.rack: 2 in the config) +# Select nodes by address, which can include wildcards +GET /_nodes/10.0.0.3,10.0.0.4 +GET /_nodes/10.0.0.* +# Select nodes by role +GET /_nodes/_all,master:false +GET /_nodes/data:true,ingest:true +GET /_nodes/coordinating_only:true +# Select nodes by custom attribute (e.g. with something like `node.attr.rack: 2` in the configuration file) GET /_nodes/rack:2 GET /_nodes/ra*:2 GET /_nodes/ra*:2* diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index c8fa2c9bf7c40..541ee51a58adb 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -1,12 +1,23 @@ [[cluster-nodes-hot-threads]] == Nodes hot_threads -An API allowing to get the current hot threads on each node in the -cluster. Endpoints are `/_nodes/hot_threads`, and -`/_nodes/{nodesIds}/hot_threads`. +This API yields a breakdown of the hot threads on each selected node in the +cluster. Its endpoints are `/_nodes/hot_threads` and +`/_nodes/{nodes}/hot_threads`: -The output is plain text with a breakdown of each node's top hot -threads. Parameters allowed are: +[source,js] +-------------------------------------------------- +GET /_nodes/hot_threads +GET /_nodes/nodeId1,nodeId2/hot_threads +-------------------------------------------------- +// CONSOLE + +The first command gets the hot threads of all the nodes in the cluster. The +second command gets the hot threads of only `nodeId1` and `nodeId2`. Nodes can +be selected using <>. + +The output is plain text with a breakdown of each node's top hot threads. The +allowed parameters are: [horizontal] `threads`:: number of hot threads to provide, defaults to 3. diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3de850418719d..78bccc8bd695d 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -213,3 +213,12 @@ Will return, for example: // 3. All of the numbers and strings on the right hand side of *every* field in // the response are ignored. So we're really only asserting things about the // the shape of this response, not the values in it. + +This API can be restricted to a subset of the nodes using the `?nodeId` +parameter, which accepts <>: + +[source,js] +-------------------------------------------------- +GET /_cluster/stats?nodeId=node1,node*,master:false +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 9b722515304bf..682df421f606b 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -23,7 +23,7 @@ There are a few concepts that are core to Elasticsearch. Understanding these con [float] === Near Realtime (NRT) -Elasticsearch is a near real time search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. +Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable. [float] === Cluster @@ -59,7 +59,7 @@ In a single cluster, you can define as many indexes as you want. deprecated[6.0.0,See <>] -A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, eg one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. +A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <> for more. [float] === Document @@ -1065,7 +1065,7 @@ In the previous section, we skipped over a little detail called the document sco But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores. -The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. +The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow us to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000. diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index bdb009167552a..4fbed66449800 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -38,10 +38,10 @@ Dynamic templates are specified as an array of named objects: <3> The mapping that the matched field should use. -Templates are processed in order -- the first matching template wins. New -templates can be appended to the end of the list with the -<> API. If a new template has the same -name as an existing template, it will replace the old version. +Templates are processed in order -- the first matching template wins. When +putting new dynamic templates through the <> API, +all existing templates are overwritten. This allows for dynamic templates to be +reordered or deleted after they were initially added. [[match-mapping-type]] ==== `match_mapping_type` diff --git a/docs/reference/migration/migrate_6_0/search.asciidoc b/docs/reference/migration/migrate_6_0/search.asciidoc index 3416c940e225e..c16e981a92d20 100644 --- a/docs/reference/migration/migrate_6_0/search.asciidoc +++ b/docs/reference/migration/migrate_6_0/search.asciidoc @@ -17,7 +17,7 @@ * The `mlt` query (a synonym for the `more_like_this` query) has been removed. * The deprecated `like_text`, `ids` and `docs` parameters (all synonyms for `like`) of the `more_like_this` query have -been removed. Also the deprecated `min_word_len` (a synonym for `min_word_length`) and `max_word_len` +been removed. Also the deprecated `min_word_len` (a synonym for `min_word_length`) and `max_word_len` (a synonym for `max_word_length`) have been removed. * The `fuzzy_match` and `match_fuzzy` query (synonyma for the `match` query) have been removed @@ -215,6 +215,11 @@ The ability to query and index context enabled suggestions without contexts has been deprecated. Context enabled suggestion queries without contexts have to visit every suggestion, which degrades the search performance considerably. +For geo context the value of the `path` parameter is now validated against the mapping, +and if `path` points to a non `geo_point` field or the field doesn't exist a deprecation +warning will be issued. In 7.0 it will be required for the `path` to point to a correct +`geo_point` field. + ==== Limiting the max number of expansion of span_multi queries `span_multi` queries will hit too many clauses failure if the number of terms that match the @@ -223,4 +228,3 @@ can set the <> of the multi term q rewrite. Or, if you use `span_multi` on `prefix` query only, you can activate the <> field option of the `text` field instead. This will rewrite any prefix query on the field to a a single term query that matches the indexed prefix. - diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java index 5d881632deeee..f43663da06a42 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/InitializerTests.java @@ -53,7 +53,7 @@ public void testArrayInitializers() { "Object[] x = new Object[] {y, z, 1 + s, s + 'aaa'}; return x;"); assertEquals(4, objects.length); - assertEquals(new Integer(2), objects[0]); + assertEquals(Integer.valueOf(2), objects[0]); assertEquals(new ArrayList(), objects[1]); assertEquals("1aaa", objects[2]); assertEquals("aaaaaa", objects[3]); @@ -85,7 +85,7 @@ public void testListInitializers() { list = (List)exec("int y = 2; List z = new ArrayList(); String s = 'aaa'; List x = [y, z, 1 + s, s + 'aaa']; return x;"); assertEquals(4, list.size()); - assertEquals(new Integer(2), list.get(0)); + assertEquals(Integer.valueOf(2), list.get(0)); assertEquals(new ArrayList(), list.get(1)); assertEquals("1aaa", list.get(2)); assertEquals("aaaaaa", list.get(3)); @@ -100,15 +100,15 @@ public void testMapInitializers() { map = (Map)exec("[5 : 7, -1 : 14]"); assertEquals(2, map.size()); - assertEquals(new Integer(7), map.get(5)); - assertEquals(new Integer(14), map.get(-1)); + assertEquals(Integer.valueOf(7), map.get(5)); + assertEquals(Integer.valueOf(14), map.get(-1)); map = (Map)exec("int y = 2; int z = 3; Map x = [y*z : y + z, y - z : y, z : z]; return x;"); assertEquals(3, map.size()); - assertEquals(new Integer(5), map.get(6)); - assertEquals(new Integer(2), map.get(-1)); - assertEquals(new Integer(3), map.get(3)); + assertEquals(Integer.valueOf(5), map.get(6)); + assertEquals(Integer.valueOf(2), map.get(-1)); + assertEquals(Integer.valueOf(3), map.get(3)); map = (Map)exec("int y = 2; List z = new ArrayList(); String s = 'aaa';" + "def x = [y : z, 1 + s : s + 'aaa']; return x;"); @@ -139,7 +139,7 @@ public void testCrazyInitializer() { list3.add(9); assertEquals(3, map.size()); - assertEquals(new Integer(5), map.get(6)); + assertEquals(Integer.valueOf(5), map.get(6)); assertEquals(list2, map.get("s")); assertEquals(list3, map.get(3)); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index ef5a73c8c8a15..490167d9c591f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -57,7 +57,6 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpTransport; -import org.elasticsearch.transport.TransportRequestOptions; import java.io.IOException; import java.net.InetSocketAddress; @@ -150,7 +149,6 @@ private Bootstrap createBootstrap() { bootstrap.handler(getClientChannelInitializer()); - bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.toIntExact(defaultConnectionProfile.getConnectTimeout().millis())); bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings)); @@ -178,14 +176,8 @@ private void createServerBootstrap(ProfileSettings profileSettings) { String name = profileSettings.profileName; if (logger.isDebugEnabled()) { logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " - + "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]", + + "receive_predictor[{}->{}]", name, workerCount, profileSettings.portOrRange, profileSettings.bindHosts, profileSettings.publishHosts, compress, - defaultConnectionProfile.getConnectTimeout(), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.REG), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE), - defaultConnectionProfile.getNumConnectionsPerType(TransportRequestOptions.Type.PING), receivePredictorMin, receivePredictorMax); } @@ -276,8 +268,12 @@ protected NettyTcpChannel bind(String name, InetSocketAddress address) { return esChannel; } - ScheduledPing getPing() { - return scheduledPing; + long successfulPingCount() { + return successfulPings.count(); + } + + long failedPingCount() { + return failedPings.count(); } @Override diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java index 4aed64459dba5..3b34e323e47b5 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java @@ -31,10 +31,8 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpTransport; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; @@ -48,6 +46,7 @@ import static org.hamcrest.Matchers.greaterThan; public class Netty4ScheduledPingTests extends ESTestCase { + public void testScheduledPing() throws Exception { ThreadPool threadPool = new TestThreadPool(getClass().getName()); @@ -63,14 +62,14 @@ public void testScheduledPing() throws Exception { final Netty4Transport nettyA = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null); + null); serviceA.start(); serviceA.acceptIncomingRequests(); final Netty4Transport nettyB = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null); + null); serviceB.start(); serviceB.acceptIncomingRequests(); @@ -82,22 +81,19 @@ public void testScheduledPing() throws Exception { serviceB.connectToNode(nodeA); assertBusy(() -> { - assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(100L)); - assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(100L)); + assertThat(nettyA.successfulPingCount(), greaterThan(100L)); + assertThat(nettyB.successfulPingCount(), greaterThan(100L)); }); - assertThat(nettyA.getPing().getFailedPings(), equalTo(0L)); - assertThat(nettyB.getPing().getFailedPings(), equalTo(0L)); + assertThat(nettyA.failedPingCount(), equalTo(0L)); + assertThat(nettyB.failedPingCount(), equalTo(0L)); serviceA.registerRequestHandler("internal:sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, - new TransportRequestHandler() { - @Override - public void messageReceived(TransportRequest.Empty request, TransportChannel channel) { - try { - channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.EMPTY); - } catch (IOException e) { - logger.error("Unexpected failure", e); - fail(e.getMessage()); - } + (request, channel) -> { + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.EMPTY); + } catch (IOException e) { + logger.error("Unexpected failure", e); + fail(e.getMessage()); } }); @@ -129,11 +125,11 @@ public void handleException(TransportException exp) { } assertBusy(() -> { - assertThat(nettyA.getPing().getSuccessfulPings(), greaterThan(200L)); - assertThat(nettyB.getPing().getSuccessfulPings(), greaterThan(200L)); + assertThat(nettyA.successfulPingCount(), greaterThan(200L)); + assertThat(nettyB.successfulPingCount(), greaterThan(200L)); }); - assertThat(nettyA.getPing().getFailedPings(), equalTo(0L)); - assertThat(nettyB.getPing().getFailedPings(), equalTo(0L)); + assertThat(nettyA.failedPingCount(), equalTo(0L)); + assertThat(nettyB.failedPingCount(), equalTo(0L)); Releasables.close(serviceA, serviceB); terminate(threadPool); diff --git a/server/build.gradle b/server/build.gradle index 24018a40ae217..8db3ee108c08b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -351,3 +351,15 @@ if (isEclipse == false || project.path == ":server-tests") { check.dependsOn integTest integTest.mustRunAfter test } +// TODO: remove these compatibility tests in 7.0 +additionalTest('testScriptedMetricAggParamsV6Compatibility') { + include '**/ScriptedMetricAggregatorAggStateV6CompatTests.class' + include '**/InternalScriptedMetricAggStateV6CompatTests.class' + systemProperty 'es.aggregations.enable_scripted_metric_agg_param', 'true' +} + +test { + // these are tested explicitly in separate test tasks + exclude '**/ScriptedMetricAggregatorAggStateV6CompatTests.class' + exclude '**/InternalScriptedMetricAggStateV6CompatTests.class' +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index bf1974ac54077..d7575e60f5656 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -131,37 +131,6 @@ protected final void doExecute(final BulkRequest bulkRequest, final ActionListen @Override protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener listener) { - boolean hasIndexRequestsWithPipelines = false; - ImmutableOpenMap indicesMetaData = clusterService.state().getMetaData().indices(); - for (DocWriteRequest actionRequest : bulkRequest.requests) { - if (actionRequest instanceof IndexRequest) { - IndexRequest indexRequest = (IndexRequest) actionRequest; - String pipeline = indexRequest.getPipeline(); - if (pipeline == null) { - IndexMetaData indexMetaData = indicesMetaData.get(indexRequest.index()); - if (indexMetaData == null) { - indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); - } else { - String defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(indexMetaData.getSettings()); - indexRequest.setPipeline(defaultPipeline); - if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) { - hasIndexRequestsWithPipelines = true; - } - } - } else if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { - hasIndexRequestsWithPipelines = true; - } - } - } - if (hasIndexRequestsWithPipelines) { - if (clusterService.localNode().isIngestNode()) { - processBulkIndexIngestRequest(task, bulkRequest, listener); - } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener); - } - return; - } - final long startTime = relativeTime(); final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); @@ -195,7 +164,7 @@ protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener { + executeIngestAndBulk(task, bulkRequest, startTime, ActionListener.wrap(listener::onResponse, inner -> { inner.addSuppressed(e); listener.onFailure(inner); }), responses, indicesThatCannotBeCreated); @@ -229,7 +198,47 @@ public void onFailure(Exception e) { } } } else { - executeBulk(task, bulkRequest, startTime, listener, responses, emptyMap()); + executeIngestAndBulk(task, bulkRequest, startTime, listener, responses, emptyMap()); + } + } + + private void executeIngestAndBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, + final ActionListener listener, final AtomicArray responses, + Map indicesThatCannotBeCreated) { + boolean hasIndexRequestsWithPipelines = false; + ImmutableOpenMap indicesMetaData = clusterService.state().getMetaData().indices(); + for (DocWriteRequest actionRequest : bulkRequest.requests) { + if (actionRequest instanceof IndexRequest) { + IndexRequest indexRequest = (IndexRequest) actionRequest; + String pipeline = indexRequest.getPipeline(); + if (pipeline == null) { + IndexMetaData indexMetaData = indicesMetaData.get(indexRequest.index()); + if (indexMetaData == null) { + indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); + } else { + String defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(indexMetaData.getSettings()); + indexRequest.setPipeline(defaultPipeline); + if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) { + hasIndexRequestsWithPipelines = true; + } + } + } else if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { + hasIndexRequestsWithPipelines = true; + } + } + } + if (hasIndexRequestsWithPipelines) { + try { + if (clusterService.localNode().isIngestNode()) { + processBulkIndexIngestRequest(task, bulkRequest, listener); + } else { + ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener); + } + } catch (Exception e) { + listener.onFailure(e); + } + } else { + executeBulk(task, bulkRequest, startTimeNanos, listener, responses, indicesThatCannotBeCreated); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index 998cd5ba0a870..2a9d960f8ccea 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -101,7 +101,7 @@ public void onFailure(Exception e) { } @Override - protected void doRun() throws Exception { + protected void doRun() { try (Releasable ignored = nodeLocks.acquire(node)) { validateAndConnectIfNeeded(node); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java deleted file mode 100644 index 6ed9664c92b1c..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; - -/** - * Enables filtering the index templates that will be applied for an index, per create index request. - */ -public interface IndexTemplateFilter { - - /** - * @return {@code true} if the given template should be applied on the newly created index, - * {@code false} otherwise. - */ - boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template); -} diff --git a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java index cc354145b11bb..dd5d98dfabbd0 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java +++ b/server/src/main/java/org/elasticsearch/common/inject/matcher/Matchers.java @@ -16,6 +16,8 @@ package org.elasticsearch.common.inject.matcher; +import org.elasticsearch.common.SuppressForbidden; + import java.lang.annotation.Annotation; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -327,7 +329,9 @@ public String toString() { return "inPackage(" + targetPackage.getName() + ")"; } + @SuppressForbidden(reason = "ClassLoader.getDefinedPackage not available yet") public Object readResolve() { + // TODO minJava >= 9 : use ClassLoader.getDefinedPackage and remove @SuppressForbidden return inPackage(Package.getPackage(packageName)); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 0e67fd4855b3c..c099d75a19556 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -299,13 +299,13 @@ public void apply(Settings value, Settings current, Settings previous) { TcpTransport.TCP_REUSE_ADDRESS_PROFILE, TcpTransport.TCP_SEND_BUFFER_SIZE_PROFILE, TcpTransport.TCP_RECEIVE_BUFFER_SIZE_PROFILE, - TcpTransport.CONNECTIONS_PER_NODE_RECOVERY, - TcpTransport.CONNECTIONS_PER_NODE_BULK, - TcpTransport.CONNECTIONS_PER_NODE_REG, - TcpTransport.CONNECTIONS_PER_NODE_STATE, - TcpTransport.CONNECTIONS_PER_NODE_PING, + TransportService.CONNECTIONS_PER_NODE_RECOVERY, + TransportService.CONNECTIONS_PER_NODE_BULK, + TransportService.CONNECTIONS_PER_NODE_REG, + TransportService.CONNECTIONS_PER_NODE_STATE, + TransportService.CONNECTIONS_PER_NODE_PING, + TransportService.TCP_CONNECT_TIMEOUT, TcpTransport.PING_SCHEDULE, - TcpTransport.TCP_CONNECT_TIMEOUT, NetworkService.NETWORK_SERVER, TcpTransport.TCP_NO_DELAY, TcpTransport.TCP_KEEP_ALIVE, diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index b7200d2a2da4d..fc85839f3f5cb 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -541,6 +541,7 @@ public Translog.Location getTranslogLastWriteLocation() { private void revisitIndexDeletionPolicyOnTranslogSynced() throws IOException { if (combinedDeletionPolicy.hasUnreferencedCommits()) { indexWriter.deleteUnusedFiles(); + translog.trimUnreferencedReaders(); } } @@ -1949,6 +1950,8 @@ private void releaseIndexCommit(IndexCommit snapshot) throws IOException { // Revisit the deletion policy if we can clean up the snapshotting commit. if (combinedDeletionPolicy.releaseCommit(snapshot)) { ensureOpen(); + // Here we don't have to trim translog because snapshotting an index commit + // does not lock translog or prevents unreferenced files from trimming. indexWriter.deleteUnusedFiles(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index e0b754e7fa307..b0a0493ab6f63 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -53,6 +53,7 @@ import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.search.suggest.completion.context.ContextMapping; import java.io.Closeable; import java.io.IOException; @@ -454,6 +455,8 @@ private synchronized Map internalMerge(@Nullable Documen MapperMergeValidator.validateFieldReferences(indexSettings, fieldMappers, fieldAliasMappers, fullPathObjectMappers, fieldTypes); + ContextMapping.validateContextPaths(indexSettings.getIndexVersionCreated(), fieldMappers, fieldTypes::get); + if (reason == MergeReason.MAPPING_UPDATE) { // this check will only be performed on the master node when there is // a call to the update mapping API. For all other cases like diff --git a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java index 774dc95d39977..0c34c59b7be58 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java @@ -22,6 +22,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -31,6 +33,25 @@ import java.util.Map; public class ScriptedMetricAggContexts { + private static final DeprecationLogger DEPRECATION_LOGGER = + new DeprecationLogger(Loggers.getLogger(ScriptedMetricAggContexts.class)); + + // Public for access from tests + public static final String AGG_PARAM_DEPRECATION_WARNING = + "params._agg/_aggs for scripted metric aggregations are deprecated, use state/states (not in params) instead. " + + "Use -Des.aggregations.enable_scripted_metric_agg_param=false to disable."; + + public static boolean deprecatedAggParamEnabled() { + boolean enabled = Boolean.parseBoolean( + System.getProperty("es.aggregations.enable_scripted_metric_agg_param", "true")); + + if (enabled) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("enable_scripted_metric_agg_param", AGG_PARAM_DEPRECATION_WARNING); + } + + return enabled; + } + private abstract static class ParamsAndStateBase { private final Map params; private final Object state; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 75bcd8689cea6..a07e3302f20c0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -170,7 +170,7 @@ public int getNumBuckets() { return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData); } - private static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { + static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { Rounding.Builder tzRoundingBuilder = Rounding.builder(interval); if (timeZone != null) { tzRoundingBuilder.timeZone(timeZone); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 6a78ca6724988..7bc2b9a317838 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -418,7 +418,7 @@ private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, Red return currentResult; } int roundingIdx = getAppropriateRounding(list.get(0).key, list.get(list.size() - 1).key, currentResult.roundingIdx, - bucketInfo.roundingInfos); + bucketInfo.roundingInfos, targetBuckets); RoundingInfo roundingInfo = bucketInfo.roundingInfos[roundingIdx]; Rounding rounding = roundingInfo.rounding; // merge buckets using the new rounding @@ -447,8 +447,8 @@ private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, Red return new BucketReduceResult(list, roundingInfo, roundingIdx); } - private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, - RoundingInfo[] roundings) { + static int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, + RoundingInfo[] roundings, int targetBuckets) { if (roundingIdx == roundings.length - 1) { return roundingIdx; } @@ -480,7 +480,7 @@ private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, currentKey = currentRounding.nextRoundingValue(currentKey); } currentRoundingIdx++; - } while (requiredBuckets > (targetBuckets * roundings[roundingIdx].getMaximumInnerInterval()) + } while (requiredBuckets > (targetBuckets * roundings[currentRoundingIdx - 1].getMaximumInnerInterval()) && currentRoundingIdx < roundings.length); // The loop will increase past the correct rounding index here so we // need to subtract one to get the rounding index we need diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index b671f95c446cb..4d911cb550b21 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -96,7 +96,9 @@ public InternalAggregation doReduce(List aggregations, Redu } // Add _aggs to params map for backwards compatibility (redundant with a context variable on the ReduceScript created below). - params.put("_aggs", aggregationObjects); + if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) { + params.put("_aggs", aggregationObjects); + } ScriptedMetricAggContexts.ReduceScript.Factory factory = reduceContext.scriptService().compile( firstAggregation.reduceScript, ScriptedMetricAggContexts.ReduceScript.CONTEXT); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index 69e4c00cf7206..ca37b5618a2b9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -83,10 +83,17 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu // Add _agg to params map for backwards compatibility (redundant with context variables on the scripts created below). // When this is removed, aggState (as passed to ScriptedMetricAggregator) can be changed to Map, since // it won't be possible to completely replace it with another type as is possible when it's an entry in params. - if (aggParams.containsKey("_agg") == false) { - aggParams.put("_agg", new HashMap()); + Object aggState = new HashMap(); + if (ScriptedMetricAggContexts.deprecatedAggParamEnabled()) { + if (aggParams.containsKey("_agg") == false) { + // Add _agg if it wasn't added manually + aggParams.put("_agg", aggState); + } else { + // If it was added manually, also use it for the agg context variable to reduce the likelihood of + // weird behavior due to multiple different variables. + aggState = aggParams.get("_agg"); + } } - Object aggState = aggParams.get("_agg"); final ScriptedMetricAggContexts.InitScript initScript = this.initScript.newInstance( mergeParams(aggParams, initScriptParams), aggState); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java index 7eab4e072f146..af7300bc79fbe 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMapping.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest.completion.context; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -28,6 +29,8 @@ import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.CompletionFieldMapper; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; @@ -35,6 +38,7 @@ import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.function.Function; /** * A {@link ContextMapping} defines criteria that can be used to @@ -131,6 +135,31 @@ public final List parseQueryContext(XContentParser parser) */ protected abstract XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException; + /** + * Checks if the current context is consistent with the rest of the fields. For example, the GeoContext + * should check that the field that it points to has the correct type. + */ + protected void validateReferences(Version indexVersionCreated, Function fieldResolver) { + // No validation is required by default + } + + /** + * Verifies that all field paths specified in contexts point to the fields with correct mappings + */ + public static void validateContextPaths(Version indexVersionCreated, List fieldMappers, + Function fieldResolver) { + for (FieldMapper fieldMapper : fieldMappers) { + if (CompletionFieldMapper.CONTENT_TYPE.equals(fieldMapper.typeName())) { + CompletionFieldMapper.CompletionFieldType fieldType = ((CompletionFieldMapper) fieldMapper).fieldType(); + if (fieldType.hasContextMappings()) { + for (ContextMapping context : fieldType.getContextMappings()) { + context.validateReferences(indexVersionCreated, fieldResolver); + } + } + } + } + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(FIELD_NAME, name); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java index 4d6b53296f157..3f10dfab9262f 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/ContextMappings.java @@ -39,6 +39,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -52,7 +53,7 @@ * and creates context queries for defined {@link ContextMapping}s * for a {@link CompletionFieldMapper} */ -public class ContextMappings implements ToXContent { +public class ContextMappings implements ToXContent, Iterable { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ContextMappings.class)); @@ -102,6 +103,11 @@ public void addField(ParseContext.Document document, String name, String input, document.add(new TypedContextField(name, input, weight, contexts, document)); } + @Override + public Iterator iterator() { + return contextMappings.iterator(); + } + /** * Field prepends context values with a suggestion * Context values are associated with a type, denoted by diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index 48aaf705099da..1a10e3238ec96 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -19,12 +19,17 @@ package org.elasticsearch.search.suggest.completion.context; +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -42,6 +47,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.common.geo.GeoHashUtils.addNeighbors; @@ -69,6 +75,8 @@ public class GeoContextMapping extends ContextMapping { static final String CONTEXT_PRECISION = "precision"; static final String CONTEXT_NEIGHBOURS = "neighbours"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoContextMapping.class)); + private final int precision; private final String fieldName; @@ -205,11 +213,11 @@ public Set parseContext(Document document) { for (IndexableField field : fields) { if (field instanceof StringField) { spare.resetFromString(field.stringValue()); - } else { - // todo return this to .stringValue() once LatLonPoint implements it + geohashes.add(spare.geohash()); + } else if (field instanceof LatLonPoint || field instanceof LatLonDocValuesField) { spare.resetFromIndexableField(field); + geohashes.add(spare.geohash()); } - geohashes.add(spare.geohash()); } } } @@ -279,6 +287,21 @@ public List toInternalQueryContexts(List return internalQueryContextList; } + @Override + protected void validateReferences(Version indexVersionCreated, Function fieldResolver) { + if (fieldName != null) { + MappedFieldType mappedFieldType = fieldResolver.apply(fieldName); + if (mappedFieldType == null) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping", + "field [{}] referenced in context [{}] is not defined in the mapping", fieldName, name); + } else if (GeoPointFieldMapper.CONTENT_TYPE.equals(mappedFieldType.typeName()) == false) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("geo_context_mapping", + "field [{}] referenced in context [{}] must be mapped to geo_point, found [{}]", + fieldName, name, mappedFieldType.typeName()); + } + } + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/transport/CloseableConnection.java b/server/src/main/java/org/elasticsearch/transport/CloseableConnection.java new file mode 100644 index 0000000000000..cdc68dc10f42e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/CloseableConnection.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.concurrent.CompletableContext; + + +/** + * Abstract Transport.Connection that provides common close logic. + */ +public abstract class CloseableConnection implements Transport.Connection { + + private final CompletableContext closeContext = new CompletableContext<>(); + + @Override + public void addCloseListener(ActionListener listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public boolean isClosed() { + return closeContext.isDone(); + } + + @Override + public void close() { + // This method is safe to call multiple times as the close context will provide concurrency + // protection and only be completed once. The attached listeners will only be notified once. + closeContext.complete(null); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java new file mode 100644 index 0000000000000..1ff8b701a83e1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -0,0 +1,312 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * This class manages node connections. The connection is opened by the underlying transport. Once the + * connection is opened, this class manages the connection. This includes keep-alive pings and closing + * the connection when the connection manager is closed. + */ +public class ConnectionManager implements Closeable { + + private final ConcurrentMap connectedNodes = ConcurrentCollections.newConcurrentMap(); + private final KeyedLock connectionLock = new KeyedLock<>(); + private final Logger logger; + private final Transport transport; + private final ThreadPool threadPool; + private final TimeValue pingSchedule; + private final ConnectionProfile defaultProfile; + private final Lifecycle lifecycle = new Lifecycle(); + private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); + private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); + + public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool) { + this(settings, transport, threadPool, buildDefaultConnectionProfile(settings)); + } + + public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, ConnectionProfile defaultProfile) { + this.logger = Loggers.getLogger(getClass(), settings); + this.transport = transport; + this.threadPool = threadPool; + this.pingSchedule = TcpTransport.PING_SCHEDULE.get(settings); + this.defaultProfile = defaultProfile; + this.lifecycle.moveToStarted(); + + if (pingSchedule.millis() > 0) { + threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, new ScheduledPing()); + } + } + + public void addListener(TransportConnectionListener listener) { + this.connectionListener.listeners.add(listener); + } + + public void removeListener(TransportConnectionListener listener) { + this.connectionListener.listeners.remove(listener); + } + + public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { + return transport.openConnection(node, ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile)); + } + + /** + * Connects to a node with the given connection profile. If the node is already connected this method has no effect. + * Once a successful is established, it can be validated before being exposed. + */ + public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, + CheckedBiConsumer connectionValidator) + throws ConnectTransportException { + ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); + if (node == null) { + throw new ConnectTransportException(null, "can't connect to a null node"); + } + closeLock.readLock().lock(); // ensure we don't open connections while we are closing + try { + ensureOpen(); + try (Releasable ignored = connectionLock.acquire(node.getId())) { + Transport.Connection connection = connectedNodes.get(node); + if (connection != null) { + return; + } + boolean success = false; + try { + connection = transport.openConnection(node, resolvedProfile); + connectionValidator.accept(connection, resolvedProfile); + // we acquire a connection lock, so no way there is an existing connection + connectedNodes.put(node, connection); + if (logger.isDebugEnabled()) { + logger.debug("connected to node [{}]", node); + } + try { + connectionListener.onNodeConnected(node); + } finally { + final Transport.Connection finalConnection = connection; + connection.addCloseListener(ActionListener.wrap(() -> { + connectedNodes.remove(node, finalConnection); + connectionListener.onNodeDisconnected(node); + })); + } + if (connection.isClosed()) { + throw new NodeNotConnectedException(node, "connection concurrently closed"); + } + success = true; + } catch (ConnectTransportException e) { + throw e; + } catch (Exception e) { + throw new ConnectTransportException(node, "general node connection failure", e); + } finally { + if (success == false) { // close the connection if there is a failure + logger.trace(() -> new ParameterizedMessage("failed to connect to [{}], cleaning dangling connections", node)); + IOUtils.closeWhileHandlingException(connection); + } + } + } + } finally { + closeLock.readLock().unlock(); + } + } + + /** + * Returns a connection for the given node if the node is connected. + * Connections returned from this method must not be closed. The lifecycle of this connection is + * maintained by this connection manager + * + * @throws NodeNotConnectedException if the node is not connected + * @see #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer) + */ + public Transport.Connection getConnection(DiscoveryNode node) { + Transport.Connection connection = connectedNodes.get(node); + if (connection == null) { + throw new NodeNotConnectedException(node, "Node not connected"); + } + return connection; + } + + /** + * Returns {@code true} if the node is connected. + */ + public boolean nodeConnected(DiscoveryNode node) { + return connectedNodes.containsKey(node); + } + + /** + * Disconnected from the given node, if not connected, will do nothing. + */ + public void disconnectFromNode(DiscoveryNode node) { + Transport.Connection nodeChannels = connectedNodes.remove(node); + if (nodeChannels != null) { + // if we found it and removed it we close + nodeChannels.close(); + } + } + + public int connectedNodeCount() { + return connectedNodes.size(); + } + + @Override + public void close() { + lifecycle.moveToStopped(); + CountDownLatch latch = new CountDownLatch(1); + + // TODO: Consider moving all read/write lock (in Transport and this class) to the TransportService + threadPool.generic().execute(() -> { + closeLock.writeLock().lock(); + try { + // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close + // all instances and then clear them maps + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); + } + } + } finally { + closeLock.writeLock().unlock(); + latch.countDown(); + } + }); + + try { + try { + latch.await(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore + } + } finally { + lifecycle.moveToClosed(); + } + } + + private void ensureOpen() { + if (lifecycle.started() == false) { + throw new IllegalStateException("connection manager is closed"); + } + } + + private class ScheduledPing extends AbstractLifecycleRunnable { + + private ScheduledPing() { + super(lifecycle, logger); + } + + @Override + protected void doRunInLifecycle() { + for (Map.Entry entry : connectedNodes.entrySet()) { + Transport.Connection connection = entry.getValue(); + if (connection.sendPing() == false) { + logger.warn("attempted to send ping to connection without support for pings [{}]", connection); + } + } + } + + @Override + protected void onAfterInLifecycle() { + try { + threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); + } catch (EsRejectedExecutionException ex) { + if (ex.isExecutorShutdown()) { + logger.debug("couldn't schedule new ping execution, executor is shutting down", ex); + } else { + throw ex; + } + } + } + + @Override + public void onFailure(Exception e) { + if (lifecycle.stoppedOrClosed()) { + logger.trace("failed to send ping transport message", e); + } else { + logger.warn("failed to send ping transport message", e); + } + } + } + + private static final class DelegatingNodeConnectionListener implements TransportConnectionListener { + + private final List listeners = new CopyOnWriteArrayList<>(); + + @Override + public void onNodeDisconnected(DiscoveryNode key) { + for (TransportConnectionListener listener : listeners) { + listener.onNodeDisconnected(key); + } + } + + @Override + public void onNodeConnected(DiscoveryNode node) { + for (TransportConnectionListener listener : listeners) { + listener.onNodeConnected(node); + } + } + } + + static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { + int connectionsPerNodeRecovery = TransportService.CONNECTIONS_PER_NODE_RECOVERY.get(settings); + int connectionsPerNodeBulk = TransportService.CONNECTIONS_PER_NODE_BULK.get(settings); + int connectionsPerNodeReg = TransportService.CONNECTIONS_PER_NODE_REG.get(settings); + int connectionsPerNodeState = TransportService.CONNECTIONS_PER_NODE_STATE.get(settings); + int connectionsPerNodePing = TransportService.CONNECTIONS_PER_NODE_PING.get(settings); + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.setConnectTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); + builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); + builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); + // if we are not master eligible we don't need a dedicated channel to publish the state + builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); + // if we are not a data-node we don't need any dedicated channels for recovery + builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); + builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); + return builder.build(); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index e14f684bf72ef..b9ed42ca00a56 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.EnumSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -61,14 +62,35 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption private final TimeValue connectTimeout; private final TimeValue handshakeTimeout; - private ConnectionProfile(List handles, int numConnections, TimeValue connectTimeout, TimeValue handshakeTimeout) - { + private ConnectionProfile(List handles, int numConnections, TimeValue connectTimeout, + TimeValue handshakeTimeout) { this.handles = handles; this.numConnections = numConnections; this.connectTimeout = connectTimeout; this.handshakeTimeout = handshakeTimeout; } + /** + * takes a {@link ConnectionProfile} resolves it to a fully specified (i.e., no nulls) profile + */ + public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionProfile profile, ConnectionProfile fallbackProfile) { + Objects.requireNonNull(fallbackProfile); + if (profile == null) { + return fallbackProfile; + } else if (profile.getConnectTimeout() != null && profile.getHandshakeTimeout() != null) { + return profile; + } else { + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(profile); + if (profile.getConnectTimeout() == null) { + builder.setConnectTimeout(fallbackProfile.getConnectTimeout()); + } + if (profile.getHandshakeTimeout() == null) { + builder.setHandshakeTimeout(fallbackProfile.getHandshakeTimeout()); + } + return builder.build(); + } + } + /** * A builder to build a new {@link ConnectionProfile} */ diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 107a4b32d89f9..a12f27c93e3c4 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -48,9 +49,20 @@ public abstract class RemoteClusterAware extends AbstractComponent { /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ - public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.", - "seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterAware::parseSeedAddress, - Setting.Property.NodeScope, Setting.Property.Dynamic)); + public static final Setting.AffixSetting> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting( + "search.remote.", + "seeds", + key -> Setting.listSetting( + key, Collections.emptyList(), + s -> { + // validate seed address + parsePort(s); + return s; + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + ); public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':'; public static final String LOCAL_CLUSTER_GROUP_KEY = ""; @@ -65,18 +77,20 @@ protected RemoteClusterAware(Settings settings) { this.clusterNameResolver = new ClusterNameExpressionResolver(settings); } - protected static Map> buildRemoteClustersSeeds(Settings settings) { - Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); + protected static Map>> buildRemoteClustersSeeds(Settings settings) { + Stream>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings); return allConcreteSettings.collect( Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> { String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting); - List nodes = new ArrayList<>(); - for (InetSocketAddress address : concreteSetting.get(settings)) { - TransportAddress transportAddress = new TransportAddress(address); - DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(), - transportAddress, - Version.CURRENT.minimumCompatibilityVersion()); - nodes.add(node); + List addresses = concreteSetting.get(settings); + List> nodes = new ArrayList<>(addresses.size()); + for (String address : addresses) { + nodes.add(() -> { + TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); + return new DiscoveryNode(clusterName + "#" + transportAddress.toString(), + transportAddress, + Version.CURRENT.minimumCompatibilityVersion()); + }); } return nodes; })); @@ -128,7 +142,7 @@ public Map> groupClusterIndices(String[] requestIndices, Pr * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * empty the cluster alias is unregistered and should be removed. */ - protected abstract void updateRemoteCluster(String clusterAlias, List addresses); + protected abstract void updateRemoteCluster(String clusterAlias, List addresses); /** * Registers this instance to listen to updates on the cluster settings. @@ -138,27 +152,35 @@ public void listenForUpdates(ClusterSettings clusterSettings) { (namespace, value) -> {}); } - private static InetSocketAddress parseSeedAddress(String remoteHost) { - int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 - if (portSeparator == -1 || portSeparator == remoteHost.length()) { - throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); - } - String host = remoteHost.substring(0, portSeparator); + protected static InetSocketAddress parseSeedAddress(String remoteHost) { + String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost)); InetAddress hostAddress; try { hostAddress = InetAddress.getByName(host); } catch (UnknownHostException e) { throw new IllegalArgumentException("unknown host [" + host + "]", e); } + return new InetSocketAddress(hostAddress, parsePort(remoteHost)); + } + + private static int parsePort(String remoteHost) { try { - int port = Integer.valueOf(remoteHost.substring(portSeparator + 1)); + int port = Integer.valueOf(remoteHost.substring(indexOfPortSeparator(remoteHost) + 1)); if (port <= 0) { throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]"); } - return new InetSocketAddress(hostAddress, port); + return port; } catch (NumberFormatException e) { - throw new IllegalArgumentException("port must be a number", e); + throw new IllegalArgumentException("failed to parse port", e); + } + } + + private static int indexOfPortSeparator(String remoteHost) { + int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300 + if (portSeparator == -1 || portSeparator == remoteHost.length()) { + throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead"); } + return portSeparator; } public static String buildRemoteIndexName(String clusterAlias, String indexName) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 4edfa437eafaa..5bdde79241ee6 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; @@ -88,7 +89,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo private final String clusterAlias; private final int maxNumRemoteConnections; private final Predicate nodePredicate; - private volatile List seedNodes; + private volatile List> seedNodes; private volatile boolean skipUnavailable; private final ConnectHandler connectHandler; private SetOnce remoteClusterName = new SetOnce<>(); @@ -103,7 +104,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo * @param maxNumRemoteConnections the maximum number of connections to the remote cluster * @param nodePredicate a predicate to filter eligible remote nodes to connect to */ - RemoteClusterConnection(Settings settings, String clusterAlias, List seedNodes, + RemoteClusterConnection(Settings settings, String clusterAlias, List> seedNodes, TransportService transportService, int maxNumRemoteConnections, Predicate nodePredicate) { super(settings); this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); @@ -112,8 +113,8 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo this.nodePredicate = nodePredicate; this.clusterAlias = clusterAlias; ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); - builder.setConnectTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); - builder.setHandshakeTimeout(TcpTransport.TCP_CONNECT_TIMEOUT.get(settings)); + builder.setConnectTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TransportService.TCP_CONNECT_TIMEOUT.get(settings)); builder.addConnections(6, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING); // TODO make this configurable? builder.addConnections(0, // we don't want this to be used for anything else but search TransportRequestOptions.Type.BULK, @@ -131,7 +132,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo /** * Updates the list of seed nodes for this cluster connection */ - synchronized void updateSeedNodes(List seedNodes, ActionListener connectListener) { + synchronized void updateSeedNodes(List> seedNodes, ActionListener connectListener) { this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes)); connectHandler.connect(connectListener); } @@ -293,11 +294,26 @@ public void sendRequest(long requestId, String action, TransportRequest request, TransportActionProxy.wrapRequest(targetNode, request), options); } + @Override + public boolean sendPing() { + return proxyConnection.sendPing(); + } + @Override public void close() { assert false: "proxy connections must not be closed"; } + @Override + public void addCloseListener(ActionListener listener) { + proxyConnection.addCloseListener(listener); + } + + @Override + public boolean isClosed() { + return proxyConnection.isClosed(); + } + @Override public Version getVersion() { return proxyConnection.getVersion(); @@ -445,7 +461,7 @@ protected void doRun() { }); } - void collectRemoteNodes(Iterator seedNodes, + private void collectRemoteNodes(Iterator> seedNodes, final TransportService transportService, ActionListener listener) { if (Thread.currentThread().isInterrupted()) { listener.onFailure(new InterruptedException("remote connect thread got interrupted")); @@ -453,7 +469,7 @@ void collectRemoteNodes(Iterator seedNodes, try { if (seedNodes.hasNext()) { cancellableThreads.executeIO(() -> { - final DiscoveryNode seedNode = seedNodes.next(); + final DiscoveryNode seedNode = seedNodes.next().get(); final TransportService.HandshakeResponse handshakeResponse; Transport.Connection connection = transportService.openConnection(seedNode, ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null)); @@ -543,11 +559,11 @@ private class SniffClusterStateResponseHandler implements TransportResponseHandl private final TransportService transportService; private final Transport.Connection connection; private final ActionListener listener; - private final Iterator seedNodes; + private final Iterator> seedNodes; private final CancellableThreads cancellableThreads; SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection, - ActionListener listener, Iterator seedNodes, + ActionListener listener, Iterator> seedNodes, CancellableThreads cancellableThreads) { this.transportService = transportService; this.connection = connection; @@ -679,7 +695,8 @@ public void handleResponse(NodesInfoResponse response) { } } RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias, - seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()), new ArrayList<>(httpAddresses), + seedNodes.stream().map(sup -> sup.get().getAddress()).collect(Collectors.toList()), + new ArrayList<>(httpAddresses), maxNumRemoteConnections, connectedNodes.size(), RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); listener.onResponse(remoteConnectionInfo); @@ -696,7 +713,6 @@ public String executor() { } }); } - } int getNumNodesConnected() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index fb67faeb66a97..a259c6eb4027a 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -41,7 +42,6 @@ import java.io.Closeable; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -116,7 +116,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes * @param connectionListener a listener invoked once every configured cluster has been connected to */ - private synchronized void updateRemoteClusters(Map> seeds, ActionListener connectionListener) { + private synchronized void updateRemoteClusters(Map>> seeds, + ActionListener connectionListener) { if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) { throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); } @@ -126,7 +127,7 @@ private synchronized void updateRemoteClusters(Map> } else { CountDown countDown = new CountDown(seeds.size()); remoteClusters.putAll(this.remoteClusters); - for (Map.Entry> entry : seeds.entrySet()) { + for (Map.Entry>> entry : seeds.entrySet()) { RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey()); if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection try { @@ -311,16 +312,17 @@ synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavail } } - protected void updateRemoteCluster(String clusterAlias, List addresses) { + @Override + protected void updateRemoteCluster(String clusterAlias, List addresses) { updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {})); } void updateRemoteCluster( final String clusterAlias, - final List addresses, + final List addresses, final ActionListener connectionListener) { - final List nodes = addresses.stream().map(address -> { - final TransportAddress transportAddress = new TransportAddress(address); + final List> nodes = addresses.stream().>map(address -> () -> { + final TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address)); final String id = clusterAlias + "#" + transportAddress.toString(); final Version version = Version.CURRENT.minimumCompatibilityVersion(); return new DiscoveryNode(id, transportAddress, version); @@ -335,7 +337,7 @@ void updateRemoteCluster( void initializeRemoteClusters() { final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); final PlainActionFuture future = new PlainActionFuture<>(); - Map> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); + Map>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings); updateRemoteClusters(seeds, future); try { future.get(timeValue.millis(), TimeUnit.MILLISECONDS); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 118987e2fa099..a8dbb176cef43 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -21,8 +21,6 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; @@ -30,8 +28,7 @@ import org.elasticsearch.action.NotifyOnceListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.CheckedBiConsumer; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; @@ -49,7 +46,6 @@ import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.network.NetworkAddress; @@ -63,11 +59,9 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.rest.RestStatus; @@ -89,7 +83,6 @@ import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -140,18 +133,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements // the scheduled internal ping interval setting, defaults to disabled (-1) public static final Setting PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_RECOVERY = - intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_BULK = - intSetting("transport.connections_per_node.bulk", 3, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_REG = - intSetting("transport.connections_per_node.reg", 6, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_STATE = - intSetting("transport.connections_per_node.state", 1, 1, Setting.Property.NodeScope); - public static final Setting CONNECTIONS_PER_NODE_PING = - intSetting("transport.connections_per_node.ping", 1, 1, Setting.Property.NodeScope); - public static final Setting TCP_CONNECT_TIMEOUT = - timeSetting("transport.tcp.connect_timeout", NetworkService.TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope); public static final Setting TCP_NO_DELAY = boolSetting("transport.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope); public static final Setting TCP_KEEP_ALIVE = @@ -159,11 +140,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements public static final Setting TCP_REUSE_ADDRESS = boolSetting("transport.tcp.reuse_address", NetworkService.TCP_REUSE_ADDRESS, Setting.Property.NodeScope); public static final Setting TCP_SEND_BUFFER_SIZE = - Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TCP_SEND_BUFFER_SIZE, - Setting.Property.NodeScope); + Setting.byteSizeSetting("transport.tcp.send_buffer_size", NetworkService.TCP_SEND_BUFFER_SIZE, Setting.Property.NodeScope); public static final Setting TCP_RECEIVE_BUFFER_SIZE = - Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TCP_RECEIVE_BUFFER_SIZE, - Setting.Property.NodeScope); + Setting.byteSizeSetting("transport.tcp.receive_buffer_size", NetworkService.TCP_RECEIVE_BUFFER_SIZE, Setting.Property.NodeScope); public static final Setting.AffixSetting TCP_NO_DELAY_PROFILE = affixKeySetting("transport.profiles.", "tcp_no_delay", @@ -186,17 +165,16 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements public static final Setting.AffixSetting PUBLISH_PORT_PROFILE = affixKeySetting("transport.profiles.", "publish_port", key -> intSetting(key, -1, -1, Setting.Property.NodeScope)); - private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); public static final int PING_DATA_SIZE = -1; + protected final CounterMetric successfulPings = new CounterMetric(); + protected final CounterMetric failedPings = new CounterMetric(); + private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); public static final String FEATURE_PREFIX = "transport.features"; public static final Setting DEFAULT_FEATURES_SETTING = Setting.groupSetting(FEATURE_PREFIX + ".", Setting.Property.NodeScope); private final String[] features; private final CircuitBreakerService circuitBreakerService; - // package visibility for tests - protected final ScheduledPing scheduledPing; - private final TimeValue pingSchedule; protected final ThreadPool threadPool; private final BigArrays bigArrays; protected final NetworkService networkService; @@ -205,21 +183,19 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final DelegatingTransportConnectionListener transportListener = new DelegatingTransportConnectionListener(); private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); + // node id to actual channel - private final ConcurrentMap connectedNodes = newConcurrentMap(); private final Map> serverChannels = newConcurrentMap(); private final Set acceptedChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final KeyedLock connectionLock = new KeyedLock<>(); private final NamedWriteableRegistry namedWriteableRegistry; // this lock is here to make sure we close this transport and disconnect all the client nodes - // connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?) + // connections while no connect operations is going on private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); protected final boolean compress; private volatile BoundTransportAddress boundAddress; private final String transportName; - protected final ConnectionProfile defaultConnectionProfile; private final ConcurrentMap pendingHandshakes = new ConcurrentHashMap<>(); private final CounterMetric numHandshakes = new CounterMetric(); @@ -229,6 +205,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final MeanMetric transmittedBytesMetric = new MeanMetric(); private volatile Map requestHandlers = Collections.emptyMap(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); + private final BytesReference pingMessage; public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, @@ -238,13 +215,10 @@ public TcpTransport(String transportName, Settings settings, ThreadPool threadPo this.threadPool = threadPool; this.bigArrays = bigArrays; this.circuitBreakerService = circuitBreakerService; - this.scheduledPing = new ScheduledPing(); - this.pingSchedule = PING_SCHEDULE.get(settings); this.namedWriteableRegistry = namedWriteableRegistry; this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); this.networkService = networkService; this.transportName = transportName; - defaultConnectionProfile = buildDefaultConnectionProfile(settings); final Settings defaultFeatures = DEFAULT_FEATURES_SETTING.get(settings); if (defaultFeatures == null) { this.features = new String[0]; @@ -257,32 +231,19 @@ public TcpTransport(String transportName, Settings settings, ThreadPool threadPo // use a sorted set to present the features in a consistent order this.features = new TreeSet<>(defaultFeatures.names()).toArray(new String[defaultFeatures.names().size()]); } - } - static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { - int connectionsPerNodeRecovery = CONNECTIONS_PER_NODE_RECOVERY.get(settings); - int connectionsPerNodeBulk = CONNECTIONS_PER_NODE_BULK.get(settings); - int connectionsPerNodeReg = CONNECTIONS_PER_NODE_REG.get(settings); - int connectionsPerNodeState = CONNECTIONS_PER_NODE_STATE.get(settings); - int connectionsPerNodePing = CONNECTIONS_PER_NODE_PING.get(settings); - ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); - builder.setConnectTimeout(TCP_CONNECT_TIMEOUT.get(settings)); - builder.setHandshakeTimeout(TCP_CONNECT_TIMEOUT.get(settings)); - builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); - builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); - // if we are not master eligible we don't need a dedicated channel to publish the state - builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); - // if we are not a data-node we don't need any dedicated channels for recovery - builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); - builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); - return builder.build(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeByte((byte) 'E'); + out.writeByte((byte) 'S'); + out.writeInt(TcpTransport.PING_DATA_SIZE); + pingMessage = out.bytes(); + } catch (IOException e) { + throw new AssertionError(e.getMessage(), e); // won't happen + } } @Override protected void doStart() { - if (pingSchedule.millis() > 0) { - threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing); - } } @Override @@ -344,93 +305,12 @@ public String executor() { } } - public class ScheduledPing extends AbstractLifecycleRunnable { - - /** - * The magic number (must be lower than 0) for a ping message. This is handled - * specifically in {@link TcpTransport#validateMessageHeader}. - */ - private final BytesReference pingHeader; - final CounterMetric successfulPings = new CounterMetric(); - final CounterMetric failedPings = new CounterMetric(); - - public ScheduledPing() { - super(lifecycle, logger); - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.writeByte((byte) 'E'); - out.writeByte((byte) 'S'); - out.writeInt(PING_DATA_SIZE); - pingHeader = out.bytes(); - } catch (IOException e) { - throw new IllegalStateException(e.getMessage(), e); // won't happen - } - } - - @Override - protected void doRunInLifecycle() throws Exception { - for (Map.Entry entry : connectedNodes.entrySet()) { - DiscoveryNode node = entry.getKey(); - NodeChannels channels = entry.getValue(); - for (TcpChannel channel : channels.getChannels()) { - internalSendMessage(channel, pingHeader, new SendMetricListener(pingHeader.length()) { - @Override - protected void innerInnerOnResponse(Void v) { - successfulPings.inc(); - } - - @Override - protected void innerOnFailure(Exception e) { - if (channel.isOpen()) { - logger.debug(() -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); - failedPings.inc(); - } else { - logger.trace(() -> - new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); - } - - } - }); - } - } - } - - public long getSuccessfulPings() { - return successfulPings.count(); - } - - public long getFailedPings() { - return failedPings.count(); - } - - @Override - protected void onAfterInLifecycle() { - try { - threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, this); - } catch (EsRejectedExecutionException ex) { - if (ex.isExecutorShutdown()) { - logger.debug("couldn't schedule new ping execution, executor is shutting down", ex); - } else { - throw ex; - } - } - } - - @Override - public void onFailure(Exception e) { - if (lifecycle.stoppedOrClosed()) { - logger.trace("failed to send ping transport message", e); - } else { - logger.warn("failed to send ping transport message", e); - } - } - } - - public final class NodeChannels implements Connection { + public final class NodeChannels extends CloseableConnection { private final Map typeMapping; private final List channels; private final DiscoveryNode node; - private final AtomicBoolean closed = new AtomicBoolean(false); private final Version version; + private final AtomicBoolean isClosing = new AtomicBoolean(false); NodeChannels(DiscoveryNode node, List channels, ConnectionProfile connectionProfile, Version handshakeVersion) { this.node = node; @@ -462,13 +342,38 @@ public TcpChannel channel(TransportRequestOptions.Type type) { return connectionTypeHandle.getChannel(channels); } - public boolean allChannelsOpen() { + boolean allChannelsOpen() { return channels.stream().allMatch(TcpChannel::isOpen); } + @Override + public boolean sendPing() { + for (TcpChannel channel : channels) { + internalSendMessage(channel, pingMessage, new SendMetricListener(pingMessage.length()) { + @Override + protected void innerInnerOnResponse(Void v) { + successfulPings.inc(); + } + + @Override + protected void innerOnFailure(Exception e) { + if (channel.isOpen()) { + logger.debug(() -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); + failedPings.inc(); + } else { + logger.trace(() -> + new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); + } + + } + }); + } + return true; + } + @Override public void close() { - if (closed.compareAndSet(false, true)) { + if (isClosing.compareAndSet(false, true)) { try { if (lifecycle.stopped()) { /* We set SO_LINGER timeout to 0 to ensure that when we shutdown the node we don't @@ -491,7 +396,8 @@ public void close() { boolean block = lifecycle.stopped() && Transports.isTransportThread(Thread.currentThread()) == false; TcpChannel.closeChannels(channels, block); } finally { - transportListener.onConnectionClosed(this); + // Call the super method to trigger listeners + super.close(); } } } @@ -504,118 +410,29 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - if (closed.get()) { + if (isClosing.get()) { throw new NodeNotConnectedException(node, "connection already closed"); } TcpChannel channel = channel(options.type()); sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), (byte) 0); } - - boolean isClosed() { - return closed.get(); - } } - @Override - public boolean nodeConnected(DiscoveryNode node) { - return connectedNodes.containsKey(node); + // This allows transport implementations to potentially override specific connection profiles. This + // primarily exists for the test implementations. + protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile connectionProfile) { + return connectionProfile; } @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - connectionProfile = resolveConnectionProfile(connectionProfile); - if (node == null) { - throw new ConnectTransportException(null, "can't connect to a null node"); - } - closeLock.readLock().lock(); // ensure we don't open connections while we are closing - try { - ensureOpen(); - try (Releasable ignored = connectionLock.acquire(node.getId())) { - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels != null) { - return; - } - boolean success = false; - try { - nodeChannels = openConnection(node, connectionProfile); - connectionValidator.accept(nodeChannels, connectionProfile); - // we acquire a connection lock, so no way there is an existing connection - connectedNodes.put(node, nodeChannels); - if (logger.isDebugEnabled()) { - logger.debug("connected to node [{}]", node); - } - try { - transportListener.onNodeConnected(node); - } finally { - if (nodeChannels.isClosed()) { - // we got closed concurrently due to a disconnect or some other event on the channel. - // the close callback will close the NodeChannel instance first and then try to remove - // the connection from the connected nodes. It will NOT acquire the connectionLock for - // the node to prevent any blocking calls on network threads. Yet, we still establish a happens - // before relationship to the connectedNodes.put since we check if we can remove the - // (DiscoveryNode, NodeChannels) tuple from the map after we closed. Here we check if it's closed an if so we - // try to remove it first either way one of the two wins even if the callback has run before we even added the - // tuple to the map since in that case we remove it here again - if (connectedNodes.remove(node, nodeChannels)) { - transportListener.onNodeDisconnected(node); - } - throw new NodeNotConnectedException(node, "connection concurrently closed"); - } - } - success = true; - } catch (ConnectTransportException e) { - throw e; - } catch (Exception e) { - throw new ConnectTransportException(node, "general node connection failure", e); - } finally { - if (success == false) { // close the connection if there is a failure - logger.trace(() -> new ParameterizedMessage("failed to connect to [{}], cleaning dangling connections", node)); - IOUtils.closeWhileHandlingException(nodeChannels); - } - } - } - } finally { - closeLock.readLock().unlock(); - } - } - - /** - * takes a {@link ConnectionProfile} that have been passed as a parameter to the public methods - * and resolves it to a fully specified (i.e., no nulls) profile - */ - static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionProfile connectionProfile, - ConnectionProfile defaultConnectionProfile) { - Objects.requireNonNull(defaultConnectionProfile); - if (connectionProfile == null) { - return defaultConnectionProfile; - } else if (connectionProfile.getConnectTimeout() != null && connectionProfile.getHandshakeTimeout() != null) { - return connectionProfile; - } else { - ConnectionProfile.Builder builder = new ConnectionProfile.Builder(connectionProfile); - if (connectionProfile.getConnectTimeout() == null) { - builder.setConnectTimeout(defaultConnectionProfile.getConnectTimeout()); - } - if (connectionProfile.getHandshakeTimeout() == null) { - builder.setHandshakeTimeout(defaultConnectionProfile.getHandshakeTimeout()); - } - return builder.build(); - } - } - - protected ConnectionProfile resolveConnectionProfile(ConnectionProfile connectionProfile) { - return resolveConnectionProfile(connectionProfile, defaultConnectionProfile); - } - - @Override - public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { + public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { + Objects.requireNonNull(connectionProfile, "connection profile cannot be null"); if (node == null) { throw new ConnectTransportException(null, "can't open connection to a null node"); } boolean success = false; NodeChannels nodeChannels = null; - connectionProfile = resolveConnectionProfile(connectionProfile); + connectionProfile = maybeOverrideConnectionProfile(connectionProfile); closeLock.readLock().lock(); // ensure we don't open connections while we are closing try { ensureOpen(); @@ -661,16 +478,16 @@ public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile c // At this point we should construct the connection, notify the transport service, and attach close listeners to the // underlying channels. nodeChannels = new NodeChannels(node, channels, connectionProfile, version); - transportListener.onConnectionOpened(nodeChannels); final NodeChannels finalNodeChannels = nodeChannels; - final AtomicBoolean runOnce = new AtomicBoolean(false); + try { + transportListener.onConnectionOpened(nodeChannels); + } finally { + nodeChannels.addCloseListener(ActionListener.wrap(() -> transportListener.onConnectionClosed(finalNodeChannels))); + } + Consumer onClose = c -> { assert c.isOpen() == false : "channel is still open when onClose is called"; - // we only need to disconnect from the nodes once since all other channels - // will also try to run this we protect it from running multiple times. - if (runOnce.compareAndSet(false, true)) { - disconnectFromNodeCloseAndNotify(node, finalNodeChannels); - } + finalNodeChannels.close(); }; nodeChannels.channels.forEach(ch -> ch.addCloseListener(ActionListener.wrap(() -> onClose.accept(ch)))); @@ -696,46 +513,6 @@ public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile c } } - private void disconnectFromNodeCloseAndNotify(DiscoveryNode node, NodeChannels nodeChannels) { - assert nodeChannels != null : "nodeChannels must not be null"; - try { - IOUtils.closeWhileHandlingException(nodeChannels); - } finally { - if (closeLock.readLock().tryLock()) { - try { - if (connectedNodes.remove(node, nodeChannels)) { - transportListener.onNodeDisconnected(node); - } - } finally { - closeLock.readLock().unlock(); - } - } - } - } - - @Override - public NodeChannels getConnection(DiscoveryNode node) { - NodeChannels nodeChannels = connectedNodes.get(node); - if (nodeChannels == null) { - throw new NodeNotConnectedException(node, "Node not connected"); - } - return nodeChannels; - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - closeLock.readLock().lock(); - NodeChannels nodeChannels = null; - try (Releasable ignored = connectionLock.acquire(node.getId())) { - nodeChannels = connectedNodes.remove(node); - } finally { - closeLock.readLock().unlock(); - if (nodeChannels != null) { // if we found it and removed it we close and notify - IOUtils.closeWhileHandlingException(nodeChannels, () -> transportListener.onNodeDisconnected(node)); - } - } - } - protected Version getCurrentVersion() { // this is just for tests to mock stuff like the nodes version - tests can override this internally return Version.CURRENT; @@ -980,19 +757,6 @@ protected final void doStop() { TcpChannel.closeChannels(new ArrayList<>(acceptedChannels), true); acceptedChannels.clear(); - - // we are holding a write lock so nobody modifies the connectedNodes / openConnections map - it's safe to first close - // all instances and then clear them maps - Iterator> iterator = connectedNodes.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry next = iterator.next(); - try { - IOUtils.closeWhileHandlingException(next.getValue()); - transportListener.onNodeDisconnected(next.getKey()); - } finally { - iterator.remove(); - } - } stopInternal(); } finally { closeLock.writeLock().unlock(); @@ -1098,8 +862,7 @@ public boolean canCompress(TransportRequest request) { private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options, Version channelVersion, - byte status) throws IOException, - TransportException { + byte status) throws IOException, TransportException { if (compress) { options = TransportRequestOptions.builder(options).withCompress(true).build(); } diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index 74235479657bf..9538119f43b8a 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -20,8 +20,8 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.component.LifecycleComponent; @@ -85,23 +85,6 @@ public interface Transport extends LifecycleComponent { */ TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException; - /** - * Returns {@code true} if the node is connected. - */ - boolean nodeConnected(DiscoveryNode node); - - /** - * Connects to a node with the given connection profile. If the node is already connected this method has no effect. - * Once a successful is established, it can be validated before being exposed. - */ - void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) throws ConnectTransportException; - - /** - * Disconnected from the given node, if not connected, will do nothing. - */ - void disconnectFromNode(DiscoveryNode node); - /** * Returns a list of all local adresses for this transport */ @@ -112,23 +95,10 @@ default CircuitBreaker getInFlightRequestBreaker() { } /** - * Returns a connection for the given node if the node is connected. - * Connections returned from this method must not be closed. The lifecycle of this connection is maintained by the Transport - * implementation. - * - * @throws NodeNotConnectedException if the node is not connected - * @see #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer) - */ - Connection getConnection(DiscoveryNode node); - - /** - * Opens a new connection to the given node and returns it. In contrast to - * {@link #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer)} the returned connection is not managed by + * Opens a new connection to the given node and returns it. The returned connection is not managed by * the transport implementation. This connection must be closed once it's not needed anymore. - * This connection type can be used to execute a handshake between two nodes before the node will be published via - * {@link #connectToNode(DiscoveryNode, ConnectionProfile, CheckedBiConsumer)}. */ - Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException; + Connection openConnection(DiscoveryNode node, ConnectionProfile profile); TransportStats getStats(); @@ -154,6 +124,21 @@ interface Connection extends Closeable { void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException; + default boolean sendPing() { + return false; + } + + /** + * The listener's {@link ActionListener#onResponse(Object)} method will be called when this + * connection is closed. No implementations currently throw an exception during close, so + * {@link ActionListener#onFailure(Exception)} will not be called. + * + * @param listener to be called + */ + void addCloseListener(ActionListener listener); + + boolean isClosed(); + /** * Returns the version of the node this connection was established with. */ @@ -168,6 +153,9 @@ default Version getVersion() { default Object getCacheKey() { return this; } + + @Override + void close(); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index d887519ee968b..60144e47a6573 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -21,11 +21,11 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; @@ -36,6 +36,7 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -43,10 +44,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskManager; @@ -70,15 +73,30 @@ import java.util.function.Supplier; import static java.util.Collections.emptyList; +import static org.elasticsearch.common.settings.Setting.intSetting; import static org.elasticsearch.common.settings.Setting.listSetting; +import static org.elasticsearch.common.settings.Setting.timeSetting; public class TransportService extends AbstractLifecycleComponent implements TransportConnectionListener { + public static final Setting CONNECTIONS_PER_NODE_RECOVERY = + intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_BULK = + intSetting("transport.connections_per_node.bulk", 3, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_REG = + intSetting("transport.connections_per_node.reg", 6, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_STATE = + intSetting("transport.connections_per_node.state", 1, 1, Setting.Property.NodeScope); + public static final Setting CONNECTIONS_PER_NODE_PING = + intSetting("transport.connections_per_node.ping", 1, 1, Setting.Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + timeSetting("transport.tcp.connect_timeout", NetworkService.TCP_CONNECT_TIMEOUT, Setting.Property.NodeScope); public static final String DIRECT_RESPONSE_PROFILE = ".direct"; public static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; private final CountDownLatch blockIncomingRequestsLatch = new CountDownLatch(1); protected final Transport transport; + protected final ConnectionManager connectionManager; protected final ThreadPool threadPool; protected final ClusterName clusterName; protected final TaskManager taskManager; @@ -131,6 +149,15 @@ public void sendRequest(long requestId, String action, TransportRequest request, sendLocalRequest(requestId, action, request, options); } + @Override + public void addCloseListener(ActionListener listener) { + } + + @Override + public boolean isClosed() { + return false; + } + @Override public void close() { } @@ -145,6 +172,13 @@ public void close() { public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { + this(settings, transport, threadPool, transportInterceptor, localNodeFactory, clusterSettings, taskHeaders, + new ConnectionManager(settings, transport, threadPool)); + } + + public TransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, + Function localNodeFactory, @Nullable ClusterSettings clusterSettings, + Set taskHeaders, ConnectionManager connectionManager) { super(settings); // The only time we do not want to validate node connections is when this is a transport client using the simple node sampler this.validateConnections = TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey())) == false || @@ -152,6 +186,7 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa this.transport = transport; this.threadPool = threadPool; this.localNodeFactory = localNodeFactory; + this.connectionManager = connectionManager; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); @@ -231,6 +266,7 @@ protected void doStart() { @Override protected void doStop() { try { + connectionManager.close(); transport.stop(); } finally { // in case the transport is not connected to our local node (thus cleaned on node disconnect) @@ -304,7 +340,7 @@ public List getLocalAddresses() { * Returns true iff the given node is already connected. */ public boolean nodeConnected(DiscoveryNode node) { - return isLocalNode(node) || transport.nodeConnected(node); + return isLocalNode(node) || connectionManager.nodeConnected(node); } /** @@ -326,7 +362,8 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection if (isLocalNode(node)) { return; } - transport.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { + + connectionManager.connectToNode(node, connectionProfile, (newConnection, actualProfile) -> { // We don't validate cluster names to allow for CCS connections. final DiscoveryNode remote = handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true).discoveryNode; if (validateConnections && node.equals(remote) == false) { @@ -339,13 +376,13 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection * Establishes and returns a new connection to the given node. The connection is NOT maintained by this service, it's the callers * responsibility to close the connection once it goes out of scope. * @param node the node to connect to - * @param profile the connection profile to use + * @param connectionProfile the connection profile to use */ - public Transport.Connection openConnection(final DiscoveryNode node, ConnectionProfile profile) throws IOException { + public Transport.Connection openConnection(final DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException { if (isLocalNode(node)) { return localNodeConnection; } else { - return transport.openConnection(node, profile); + return connectionManager.openConnection(node, connectionProfile); } } @@ -377,12 +414,11 @@ public DiscoveryNode handshake( * @param handshakeTimeout handshake timeout * @param clusterNamePredicate cluster name validation predicate * @return the handshake response - * @throws ConnectTransportException if the connection failed * @throws IllegalStateException if the handshake failed */ public HandshakeResponse handshake( final Transport.Connection connection, - final long handshakeTimeout, Predicate clusterNamePredicate) throws ConnectTransportException { + final long handshakeTimeout, Predicate clusterNamePredicate) { final HandshakeResponse response; final DiscoveryNode node = connection.getNode(); try { @@ -409,6 +445,10 @@ public HandshakeResponse newInstance() { return response; } + public ConnectionManager getConnectionManager() { + return connectionManager; + } + static class HandshakeRequest extends TransportRequest { public static final HandshakeRequest INSTANCE = new HandshakeRequest(); @@ -461,15 +501,17 @@ public void disconnectFromNode(DiscoveryNode node) { if (isLocalNode(node)) { return; } - transport.disconnectFromNode(node); + connectionManager.disconnectFromNode(node); } public void addConnectionListener(TransportConnectionListener listener) { transport.addConnectionListener(listener); + connectionManager.addListener(listener); } public void removeConnectionListener(TransportConnectionListener listener) { transport.removeConnectionListener(listener); + connectionManager.removeListener(listener); } public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, @@ -532,7 +574,7 @@ public Transport.Connection getConnection(DiscoveryNode node) { if (isLocalNode(node)) { return localNodeConnection; } else { - return transport.getConnection(node); + return connectionManager.getConnection(node); } } diff --git a/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java b/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java index 0290a6c5d100b..3760d5f6881b0 100644 --- a/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java +++ b/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java @@ -280,7 +280,7 @@ public void testCollapseFloat() throws Exception { CollapsingDocValuesProducer producer = new CollapsingDocValuesProducer() { @Override public Float randomGroup(int maxGroup) { - return new Float(randomIntBetween(0, maxGroup - 1)); + return Float.valueOf(randomIntBetween(0, maxGroup - 1)); } @Override @@ -308,7 +308,7 @@ public void testCollapseDouble() throws Exception { CollapsingDocValuesProducer producer = new CollapsingDocValuesProducer() { @Override public Double randomGroup(int maxGroup) { - return new Double(randomIntBetween(0, maxGroup - 1)); + return Double.valueOf(randomIntBetween(0, maxGroup - 1)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java index 731397e043918..aefcebde9223a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java @@ -63,7 +63,7 @@ public void setUp() throws Exception { clusterService = getInstanceFromNode(ClusterService.class); indicesService = getInstanceFromNode(IndicesService.class); CapturingTransport capturingTransport = new CapturingTransport(); - transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, + transportService = capturingTransport.createCapturingTransportService(clusterService.getSettings(), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); @@ -81,12 +81,10 @@ public void tearDown() throws Exception { public void testIncludeDefaults() { GetIndexRequest defaultsRequest = new GetIndexRequest().indices(indexName).includeDefaults(true); getIndexAction.execute(null, defaultsRequest, ActionListener.wrap( - defaultsResponse -> { - assertNotNull( - "index.refresh_interval should be set as we are including defaults", - defaultsResponse.getSetting(indexName, "index.refresh_interval") - ); - }, exception -> { + defaultsResponse -> assertNotNull( + "index.refresh_interval should be set as we are including defaults", + defaultsResponse.getSetting(indexName, "index.refresh_interval") + ), exception -> { throw new AssertionError(exception); }) ); @@ -95,12 +93,10 @@ public void testIncludeDefaults() { public void testDoNotIncludeDefaults() { GetIndexRequest noDefaultsRequest = new GetIndexRequest().indices(indexName); getIndexAction.execute(null, noDefaultsRequest, ActionListener.wrap( - noDefaultsResponse -> { - assertNull( - "index.refresh_interval should be null as it was never set", - noDefaultsResponse.getSetting(indexName, "index.refresh_interval") - ); - }, exception -> { + noDefaultsResponse -> assertNull( + "index.refresh_interval should be null as it was never set", + noDefaultsResponse.getSetting(indexName, "index.refresh_interval") + ), exception -> { throw new AssertionError(exception); }) ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java index 11f0188c8c0b0..03ccebba10dbd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -75,7 +75,7 @@ public void setUp() throws Exception { threadPool = new TestThreadPool("GetSettingsActionTests"); clusterService = createClusterService(threadPool); CapturingTransport capturingTransport = new CapturingTransport(); - transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, + transportService = capturingTransport.createCapturingTransportService(clusterService.getSettings(), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 892f2cbb61b15..d71458ddb056b 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -22,20 +22,25 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; @@ -77,6 +82,9 @@ public class TransportBulkActionIngestTests extends ESTestCase { */ private static final String WITH_DEFAULT_PIPELINE = "index_with_default_pipeline"; + private static final Settings SETTINGS = + Settings.builder().put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), true).build(); + /** Services needed by bulk action */ TransportService transportService; ClusterService clusterService; @@ -112,25 +120,42 @@ public class TransportBulkActionIngestTests extends ESTestCase { /** A subclass of the real bulk action to allow skipping real bulk indexing, and marking when it would have happened. */ class TestTransportBulkAction extends TransportBulkAction { boolean isExecuted = false; // set when the "real" bulk execution happens + + boolean needToCheck; // pluggable return value for `needToCheck` + + boolean indexCreated = true; // If set to false, will be set to true by call to createIndex + TestTransportBulkAction() { - super(Settings.EMPTY, null, transportService, clusterService, ingestService, - null, null, new ActionFilters(Collections.emptySet()), null, null); + super(SETTINGS, null, transportService, clusterService, ingestService, + null, null, new ActionFilters(Collections.emptySet()), null, + new AutoCreateIndex( + SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new IndexNameExpressionResolver(SETTINGS) + ) + ); } @Override protected boolean needToCheck() { - return false; + return needToCheck; } @Override void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener listener, final AtomicArray responses, Map indicesThatCannotBeCreated) { + assertTrue(indexCreated); isExecuted = true; } + + @Override + void createIndex(String index, TimeValue timeout, ActionListener listener) { + indexCreated = true; + listener.onResponse(null); + } } class TestSingleItemBulkWriteAction extends TransportSingleItemBulkWriteAction { TestSingleItemBulkWriteAction(TestTransportBulkAction bulkAction) { - super(Settings.EMPTY, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService, + super(SETTINGS, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService, TransportBulkActionIngestTests.this.clusterService, null, null, null, new ActionFilters(Collections.emptySet()), null, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX, bulkAction, null); @@ -162,7 +187,7 @@ public void setupAction() { when(nodes.getIngestNodes()).thenReturn(ingestNodes); ClusterState state = mock(ClusterState.class); when(state.getNodes()).thenReturn(nodes); - when(state.getMetaData()).thenReturn(MetaData.builder().indices(ImmutableOpenMap.builder() + MetaData metaData = MetaData.builder().indices(ImmutableOpenMap.builder() .putAll( Collections.singletonMap( WITH_DEFAULT_PIPELINE, @@ -170,7 +195,9 @@ public void setupAction() { settings(Version.CURRENT).put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline") .build() ).numberOfShards(1).numberOfReplicas(1).build())) - .build()).build()); + .build()).build(); + when(state.getMetaData()).thenReturn(metaData); + when(state.metaData()).thenReturn(metaData); when(clusterService.state()).thenReturn(state); doAnswer(invocation -> { ClusterChangedEvent event = mock(ClusterChangedEvent.class); @@ -408,4 +435,36 @@ public void testUseDefaultPipeline() throws Exception { verifyZeroInteractions(transportService); } + public void testCreateIndexBeforeRunPipeline() throws Exception { + Exception exception = new Exception("fake exception"); + IndexRequest indexRequest = new IndexRequest("missing_index", "type", "id"); + indexRequest.setPipeline("testpipeline"); + indexRequest.source(Collections.emptyMap()); + AtomicBoolean responseCalled = new AtomicBoolean(false); + AtomicBoolean failureCalled = new AtomicBoolean(false); + action.needToCheck = true; + action.indexCreated = false; + singleItemBulkWriteAction.execute(null, indexRequest, ActionListener.wrap( + response -> responseCalled.set(true), + e -> { + assertThat(e, sameInstance(exception)); + failureCalled.set(true); + })); + + // check failure works, and passes through to the listener + assertFalse(action.isExecuted); // haven't executed yet + assertFalse(responseCalled.get()); + assertFalse(failureCalled.get()); + verify(executionService).executeBulkRequest(bulkDocsItr.capture(), failureHandler.capture(), completionHandler.capture()); + completionHandler.getValue().accept(exception); + assertTrue(failureCalled.get()); + + // now check success + indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); // this is done by the real pipeline execution service when processing + completionHandler.getValue().accept(null); + assertTrue(action.isExecuted); + assertFalse(responseCalled.get()); // listener would only be called by real index action, not our mocked one + verifyZeroInteractions(transportService); + } + } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 3bd66af1bab05..a1abd4d61f7fd 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -80,8 +80,8 @@ public void setUp() throws Exception { threadPool = new TestThreadPool("TransportBulkActionTookTests"); clusterService = createClusterService(threadPool); CapturingTransport capturingTransport = new CapturingTransport(); - transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, + transportService = capturingTransport.createCapturingTransportService(clusterService.getSettings(), threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index af8289f0c45b1..af4346baa949a 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -91,8 +91,8 @@ public void tearDown() throws Exception { private TransportBulkAction createAction(boolean controlled, AtomicLong expected) { CapturingTransport capturingTransport = new CapturingTransport(); - TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, + TransportService transportService = capturingTransport.createCapturingTransportService(clusterService.getSettings(), threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 82e0fcaf5d667..5bea70bef17e0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -442,7 +442,17 @@ public void sendRequest(long requestId, String action, TransportRequest request, } @Override - public void close() throws IOException { + public void addCloseListener(ActionListener listener) { + + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void close() { throw new UnsupportedOperationException(); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 6a7d443553888..9bed5e7901704 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -191,7 +191,7 @@ public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - final TransportService transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, + TransportService transportService = transport.createCapturingTransportService(clusterService.getSettings(), THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index b27bc9ad79432..f592b3d803af5 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -87,8 +87,8 @@ public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); + transportService = transport.createCapturingTransportService(clusterService.getSettings(), threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); localNode = new DiscoveryNode("local_node", buildNewFakeTransportAddress(), Collections.emptyMap(), diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 60a46876a7126..3c5d3d3e1d2d6 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -180,7 +180,7 @@ public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, + transportService = transport.createCapturingTransportService(clusterService.getSettings(), THREAD_POOL, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 6756c00ea84aa..7f1b4adf8df1e 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -163,7 +163,7 @@ public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool, + transportService = transport.createCapturingTransportService(clusterService.getSettings(), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index d305630f83ed7..9a571f1e9d8eb 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -255,8 +255,8 @@ public void testDocumentFailureInShardOperationOnReplica() throws Exception { public void testReplicaProxy() throws InterruptedException, ExecutionException { CapturingTransport transport = new CapturingTransport(); - TransportService transportService = new TransportService(clusterService.getSettings(), transport, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); + TransportService transportService = transport.createCapturingTransportService(clusterService.getSettings(), threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); ShardStateAction shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); diff --git a/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 8db45cc5508ef..0505143592497 100644 --- a/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; @@ -144,16 +143,15 @@ public void setUp() throws Exception { super.setUp(); transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet() - ); + transportService = transport.createCapturingTransportService(clusterService.getSettings(), THREAD_POOL, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); action = new TestTransportInstanceSingleOperationAction( Settings.EMPTY, "indices:admin/test", transportService, - new ActionFilters(new HashSet()), + new ActionFilters(new HashSet<>()), new MyResolver(), Request::new ); diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 669a678b77e31..513f07b733cda 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.transport; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; @@ -26,13 +27,13 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.transport.CloseableConnection; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RequestHandlerRegistry; @@ -43,9 +44,9 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportStats; -import java.io.IOException; import java.net.UnknownHostException; import java.util.Collections; import java.util.Map; @@ -59,7 +60,7 @@ abstract class FailAndRetryMockTransport imp private final Random random; private final ClusterName clusterName; private volatile Map requestHandlers = Collections.emptyMap(); - final Object requestHandlerMutex = new Object(); + private final Object requestHandlerMutex = new Object(); private final ResponseHandlers responseHandlers = new ResponseHandlers(); private TransportConnectionListener listener; @@ -78,8 +79,9 @@ abstract class FailAndRetryMockTransport imp protected abstract ClusterState getMockClusterState(DiscoveryNode node); @Override - public Connection getConnection(DiscoveryNode node) { - return new Connection() { + public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) { + return new CloseableConnection() { + @Override public DiscoveryNode getNode() { return node; @@ -87,19 +89,22 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - + throws TransportException { //we make sure that nodes get added to the connected ones when calling addTransportAddress, by returning proper nodes info if (connectMode) { if (TransportLivenessAction.NAME.equals(action)) { TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); - transportResponseHandler.handleResponse(new LivenessResponse(ClusterName.CLUSTER_NAME_SETTING. - getDefault(Settings.EMPTY), - node)); + ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); + transportResponseHandler.handleResponse(new LivenessResponse(clusterName, node)); } else if (ClusterStateAction.NAME.equals(action)) { TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); ClusterState clusterState = getMockClusterState(node); transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState, 0L)); + } else if (TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { + TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); + Version version = node.getVersion(); + transportResponseHandler.handleResponse(new TransportService.HandshakeResponse(node, clusterName, version)); + } else { throw new UnsupportedOperationException("Mock transport does not understand action " + action); } @@ -129,19 +134,9 @@ public void sendRequest(long requestId, String action, TransportRequest request, } } } - - @Override - public void close() throws IOException { - - } }; } - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - return getConnection(node); - } - protected abstract Response newResponse(); public void endConnectMode() { @@ -175,23 +170,6 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi throw new UnsupportedOperationException(); } - @Override - public boolean nodeConnected(DiscoveryNode node) { - return false; - } - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - - } - @Override public Lifecycle.State lifecycleState() { return null; diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index 88db9f18d5c79..5fa0903febc11 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -50,7 +49,6 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; import org.hamcrest.CustomMatcher; import java.io.Closeable; @@ -82,7 +80,7 @@ public class TransportClientNodesServiceTests extends ESTestCase { private static class TestIteration implements Closeable { private final ThreadPool threadPool; private final FailAndRetryMockTransport transport; - private final TransportService transportService; + private final MockTransportService transportService; private final TransportClientNodesService transportClientNodesService; private final int listNodesCount; private final int sniffNodesCount; @@ -142,7 +140,8 @@ protected ClusterState getMockClusterState(DiscoveryNode node) { return ClusterState.builder(clusterName).nodes(TestIteration.this.nodeMap.get(node.getAddress())).build(); } }; - transportService = new TransportService(settings, transport, threadPool, new TransportInterceptor() { + + transportService = new MockTransportService(settings, transport, threadPool, new TransportInterceptor() { @Override public AsyncSender interceptSender(AsyncSender sender) { return new AsyncSender() { @@ -164,6 +163,11 @@ public void sendRequest(Transport.Connection conne assert addr == null : "boundAddress: " + addr; return DiscoveryNode.createLocal(settings, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()); }, null, Collections.emptySet()); + transportService.addNodeConnectedBehavior((connectionManager, discoveryNode) -> false); + transportService.addGetConnectionBehavior((connectionManager, discoveryNode) -> { + // The FailAndRetryTransport does not use the connection profile + return transport.openConnection(discoveryNode, null); + }); transportService.start(); transportService.acceptIncomingRequests(); transportClientNodesService = @@ -355,25 +359,15 @@ public void testSniffNodesSamplerClosesConnections() throws Exception { .build(); try (MockTransportService clientService = createNewService(clientSettings, Version.CURRENT, threadPool, null)) { - final List establishedConnections = new CopyOnWriteArrayList<>(); - final List reusedConnections = new CopyOnWriteArrayList<>(); - - clientService.addDelegate(remoteService, new MockTransportService.DelegateTransport(clientService.original()) { - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - MockConnection connection = new MockConnection(super.openConnection(node, profile)); - establishedConnections.add(connection); - return connection; - } + final List establishedConnections = new CopyOnWriteArrayList<>(); - @Override - public Connection getConnection(DiscoveryNode node) { - MockConnection connection = new MockConnection(super.getConnection(node)); - reusedConnections.add(connection); - return connection; - } + clientService.addConnectBehavior(remoteService, (transport, discoveryNode, profile) -> { + Transport.Connection connection = transport.openConnection(discoveryNode, profile); + establishedConnections.add(connection); + return connection; }); + clientService.start(); clientService.acceptIncomingRequests(); @@ -381,27 +375,26 @@ public Connection getConnection(DiscoveryNode node) { new TransportClientNodesService(clientSettings, clientService, threadPool, (a, b) -> {})) { assertEquals(0, transportClientNodesService.connectedNodes().size()); assertEquals(0, establishedConnections.size()); - assertEquals(0, reusedConnections.size()); transportClientNodesService.addTransportAddresses(remoteService.getLocalDiscoNode().getAddress()); assertEquals(1, transportClientNodesService.connectedNodes().size()); - assertClosedConnections(establishedConnections, 1); + assertEquals(1, clientService.connectionManager().connectedNodeCount()); transportClientNodesService.doSample(); - assertClosedConnections(establishedConnections, 2); - assertOpenConnections(reusedConnections, 1); + assertEquals(1, clientService.connectionManager().connectedNodeCount()); + establishedConnections.clear(); handler.blockRequest(); Thread thread = new Thread(transportClientNodesService::doSample); thread.start(); - assertBusy(() -> assertEquals(3, establishedConnections.size())); - assertFalse("Temporary ping connection must be opened", establishedConnections.get(2).isClosed()); + assertBusy(() -> assertTrue(establishedConnections.size() >= 1)); + assertFalse("Temporary ping connection must be opened", establishedConnections.get(0).isClosed()); handler.releaseRequest(); thread.join(); - assertClosedConnections(establishedConnections, 3); + assertTrue(establishedConnections.get(0).isClosed()); } } } finally { @@ -409,56 +402,6 @@ public Connection getConnection(DiscoveryNode node) { } } - private void assertClosedConnections(final List connections, final int size) { - assertEquals("Expecting " + size + " closed connections but got " + connections.size(), size, connections.size()); - connections.forEach(c -> assertConnection(c, true)); - } - - private void assertOpenConnections(final List connections, final int size) { - assertEquals("Expecting " + size + " open connections but got " + connections.size(), size, connections.size()); - connections.forEach(c -> assertConnection(c, false)); - } - - private static void assertConnection(final MockConnection connection, final boolean closed) { - assertEquals("Connection [" + connection + "] must be " + (closed ? "closed" : "open"), closed, connection.isClosed()); - } - - class MockConnection implements Transport.Connection { - private final AtomicBoolean closed = new AtomicBoolean(false); - private final Transport.Connection connection; - - private MockConnection(Transport.Connection connection) { - this.connection = connection; - } - - @Override - public DiscoveryNode getNode() { - return connection.getNode(); - } - - @Override - public Version getVersion() { - return connection.getVersion(); - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - connection.sendRequest(requestId, action, request, options); - } - - @Override - public void close() throws IOException { - if (closed.compareAndSet(false, true)) { - connection.close(); - } - } - - boolean isClosed() { - return closed.get(); - } - } - class MockHandler implements TransportRequestHandler { private final AtomicBoolean block = new AtomicBoolean(false); private final CountDownLatch release = new CountDownLatch(1); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index d7d232a08d43c..b17a0cc5418e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -46,12 +46,9 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; -import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -116,23 +113,23 @@ public void blockActions(String... actions) { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - // manual collection or upon cluster forming. - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") - .build(); + // manual collection or upon cluster forming. + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") + .build(); } @Override protected Collection> nodePlugins() { return Arrays.asList(TestPlugin.class, - MockTransportService.TestPlugin.class); + MockTransportService.TestPlugin.class); } public void testClusterInfoServiceCollectsInformation() throws Exception { internalCluster().startNodes(2); assertAcked(prepareCreate("test").setSettings(Settings.builder() - .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) - .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build())); + .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build())); ensureGreen("test"); InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the master node @@ -177,8 +174,8 @@ public void testClusterInfoServiceCollectsInformation() throws Exception { public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException { internalCluster().startNodes(2, - // manually control publishing - Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build()); + // manually control publishing + Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build()); prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); ensureGreen("test"); InternalTestCluster internalTestCluster = internalCluster(); @@ -196,19 +193,16 @@ public void testClusterInfoServiceInformationClearOnError() throws InterruptedEx final Set blockedActions = newHashSet(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]"); // drop all outgoing stats requests to force a timeout. for (DiscoveryNode node : internalTestCluster.clusterService().state().getNodes()) { - mockTransportService.addDelegate(internalTestCluster.getInstance(TransportService.class, node.getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) { - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { + mockTransportService.addSendBehavior(internalTestCluster.getInstance(TransportService.class, node.getName()), + (connection, requestId, action, request, options) -> { if (blockedActions.contains(action)) { if (timeout.get()) { logger.info("dropping [{}] to [{}]", action, node); return; } } - super.sendRequest(connection, requestId, action, request, options); - } - }); + connection.sendRequest(requestId, action, request, options); + }); } // timeouts shouldn't clear the info diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index eb93dad5db8a0..92c1016d3e615 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -20,16 +20,16 @@ package org.elasticsearch.cluster; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -39,6 +39,7 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -46,8 +47,6 @@ import org.junit.After; import org.junit.Before; -import java.io.IOException; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -56,6 +55,8 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Predicate; import static org.hamcrest.Matchers.equalTo; @@ -70,7 +71,7 @@ private List generateNodes() { for (int i = randomIntBetween(20, 50); i > 0; i--) { Set roles = new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); nodes.add(new DiscoveryNode("node_" + i, "" + i, buildNewFakeTransportAddress(), Collections.emptyMap(), - roles, Version.CURRENT)); + roles, Version.CURRENT)); } return nodes; } @@ -121,7 +122,7 @@ public void testReconnect() { for (int i = 0; i < 3; i++) { // simulate disconnects for (DiscoveryNode node : randomSubsetOf(nodes)) { - transport.disconnectFromNode(node); + transportService.disconnectFromNode(node); } service.new ConnectionChecker().run(); } @@ -134,18 +135,12 @@ public void testReconnect() { private void assertConnectedExactlyToNodes(ClusterState state) { assertConnected(state.nodes()); - assertThat(transport.connectedNodes.size(), equalTo(state.nodes().getSize())); + assertThat(transportService.getConnectionManager().connectedNodeCount(), equalTo(state.nodes().getSize())); } private void assertConnected(Iterable nodes) { for (DiscoveryNode node : nodes) { - assertTrue("not connected to " + node, transport.connectedNodes.contains(node)); - } - } - - private void assertNotConnected(Iterable nodes) { - for (DiscoveryNode node : nodes) { - assertFalse("still connected to " + node, transport.connectedNodes.contains(node)); + assertTrue("not connected to " + node, transportService.nodeConnected(node)); } } @@ -155,7 +150,8 @@ public void setUp() throws Exception { super.setUp(); this.threadPool = new TestThreadPool(getClass().getName()); this.transport = new MockTransport(); - transportService = new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + transportService = new NoHandshakeTransportService(Settings.EMPTY, transport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()), null, Collections.emptySet()); transportService.start(); @@ -171,11 +167,29 @@ public void tearDown() throws Exception { super.tearDown(); } - final class MockTransport implements Transport { - Set connectedNodes = ConcurrentCollections.newConcurrentSet(); - volatile boolean randomConnectionExceptions = false; + private final class NoHandshakeTransportService extends TransportService { + + private NoHandshakeTransportService(Settings settings, + Transport transport, + ThreadPool threadPool, + TransportInterceptor transportInterceptor, + Function localNodeFactory, + ClusterSettings clusterSettings, + Set taskHeaders) { + super(settings, transport, threadPool, transportInterceptor, localNodeFactory, clusterSettings, taskHeaders); + } + + @Override + public HandshakeResponse handshake(Transport.Connection connection, long timeout, Predicate clusterNamePredicate) { + return new HandshakeResponse(connection.getNode(), new ClusterName(""), Version.CURRENT); + } + } + + private final class MockTransport implements Transport { private ResponseHandlers responseHandlers = new ResponseHandlers(); - private TransportConnectionListener listener = new TransportConnectionListener() {}; + private volatile boolean randomConnectionExceptions = false; + private TransportConnectionListener listener = new TransportConnectionListener() { + }; @Override public void registerRequestHandler(RequestHandlerRegistry reg) { @@ -207,37 +221,19 @@ public Map profileBoundAddresses() { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address, int perAddressLimit) { return new TransportAddress[0]; } @Override - public boolean nodeConnected(DiscoveryNode node) { - return connectedNodes.contains(node); - } - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { + public Connection openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { if (connectionProfile == null) { - if (connectedNodes.contains(node) == false && randomConnectionExceptions && randomBoolean()) { + if (randomConnectionExceptions && randomBoolean()) { throw new ConnectTransportException(node, "simulated"); } - connectedNodes.add(node); listener.onNodeConnected(node); } - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - connectedNodes.remove(node); - listener.onNodeDisconnected(node); - } - - @Override - public Connection getConnection(DiscoveryNode node) { - return new Connection() { + Connection connection = new Connection() { @Override public DiscoveryNode getNode() { return node; @@ -249,16 +245,21 @@ public void sendRequest(long requestId, String action, TransportRequest request, } + @Override + public void addCloseListener(ActionListener listener) { + + } + @Override public void close() { } - }; - } - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) { - Connection connection = getConnection(node); + @Override + public boolean isClosed() { + return false; + } + }; listener.onConnectionOpened(connection); return connection; } @@ -282,13 +283,16 @@ public void removeLifecycleListener(LifecycleListener listener) { } @Override - public void start() {} + public void start() { + } @Override - public void stop() {} + public void stop() { + } @Override - public void close() {} + public void close() { + } @Override public TransportStats getStats() { diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 1d78cdeb98374..64fa51d159a54 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -124,8 +124,8 @@ public void setUp() throws Exception { super.setUp(); this.transport = new CapturingTransport(); clusterService = createClusterService(THREAD_POOL); - transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); + transportService = transport.createCapturingTransportService(clusterService.getSettings(), THREAD_POOL, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new TestShardStateAction(Settings.EMPTY, clusterService, transportService, null, null); diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index 0f5f4870ae1bb..32a8c06bb7020 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -94,7 +94,8 @@ public static void setupThreadPool() { public void setUp() throws Exception { super.setUp(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), new CapturingTransport(), threadPool, + CapturingTransport transport = new CapturingTransport(); + transportService = transport.createCapturingTransportService(clusterService.getSettings(), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java index 79b6aa5f60436..c17ec9380ed5b 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java @@ -58,7 +58,7 @@ public void testParseFromXContent() throws IOException { Float floatRep = randomFloat(); Number value = intValue; if (randomBoolean()) { - value = new Float(floatRep += intValue); + value = Float.valueOf(floatRep += intValue); } XContentBuilder json = jsonBuilder().startObject() .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? value.toString() : value) diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index cc6152d98962f..a0fdcbf51ca1d 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -337,7 +337,7 @@ public void testInheritContext() throws InterruptedException { final CountDownLatch executed = new CountDownLatch(1); threadContext.putHeader("foo", "bar"); - final Integer one = new Integer(1); + final Integer one = Integer.valueOf(1); threadContext.putTransient("foo", one); EsThreadPoolExecutor executor = EsExecutors.newFixed(getName(), pool, queue, EsExecutors.daemonThreadFactory("dummy"), threadContext); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index e71efa46424b2..a0a92cad7a851 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -42,7 +42,7 @@ public void testStashContext() { threadContext.putHeader("foo", "bar"); threadContext.putTransient("ctx.foo", 1); assertEquals("bar", threadContext.getHeader("foo")); - assertEquals(new Integer(1), threadContext.getTransient("ctx.foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); assertEquals("1", threadContext.getHeader("default")); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { assertNull(threadContext.getHeader("foo")); @@ -61,7 +61,7 @@ public void testStashAndMerge() { threadContext.putHeader("foo", "bar"); threadContext.putTransient("ctx.foo", 1); assertEquals("bar", threadContext.getHeader("foo")); - assertEquals(new Integer(1), threadContext.getTransient("ctx.foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); assertEquals("1", threadContext.getHeader("default")); HashMap toMerge = new HashMap<>(); toMerge.put("foo", "baz"); diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 50dfd92d82e15..0f3288b1973e5 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -44,7 +44,7 @@ import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.disruption.SlowClusterStateProcessing; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.TransportService; import org.junit.Before; import java.util.Arrays; @@ -139,7 +139,7 @@ List startCluster(int numberOfNodes, int minimumMasterNode, @Nullable in .put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly - .put(TcpTransport.TCP_CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this + .put(TransportService.TCP_CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this // value and the time of disruption and does not recover immediately // when disruption is stop. We should make sure we recover faster // then the default of 30s, causing ensureGreen and friends to time out diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index f688291237813..4d190921f2da9 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -37,13 +37,9 @@ import org.elasticsearch.test.disruption.SlowClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -175,24 +171,15 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { logger.info("allowing requests from non master [{}] to master [{}], waiting for two join request", nonMasterNode, masterNode); final CountDownLatch countDownLatch = new CountDownLatch(2); - nonMasterTransportService.addDelegate(masterTranspotService, new MockTransportService.DelegateTransport(nonMasterTransportService - .original()) { - @Override - protected void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - if (action.equals(MembershipAction.DISCOVERY_JOIN_ACTION_NAME)) { - countDownLatch.countDown(); - } - super.sendRequest(connection, requestId, action, request, options); + nonMasterTransportService.addSendBehavior(masterTransportService, (connection, requestId, action, request, options) -> { + if (action.equals(MembershipAction.DISCOVERY_JOIN_ACTION_NAME)) { + countDownLatch.countDown(); } - - @Override - public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - return super.openConnection(node, profile); - } - + connection.sendRequest(requestId, action, request, options); }); + nonMasterTransportService.addConnectBehavior(masterTransportService, Transport::openConnection); + countDownLatch.await(); logger.info("waiting for cluster to reform"); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index eef926a1e1238..4ab738f5c7bc3 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -29,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkAddress; @@ -42,14 +40,13 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.MockTcpTransport; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; @@ -152,14 +149,7 @@ public void testSimplePings() throws IOException, InterruptedException, Executio new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, - v) { - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - throw new AssertionError("zen pings should never connect to node (got [" + node + "])"); - } - }; + v); NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier); closeables.push(handleA.transportService); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 4bc71980407c1..cafc457581c7a 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4465,13 +4465,18 @@ public void testAcquireIndexCommit() throws Exception { public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { IOUtils.close(engine, store); - store = createStore(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", + Settings.builder().put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1).build()); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { + try (Store store = createStore(); + InternalEngine engine = + createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { final int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { index(engine, docId); - if (frequently()) { + if (rarely()) { engine.flush(randomBoolean(), randomBoolean()); } } @@ -4485,6 +4490,7 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); + assertThat(engine.estimateTranslogOperationsFromMinSeq(0L), equalTo(0)); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 596575abc3025..0888dfd3c40c5 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -17,12 +17,12 @@ package org.elasticsearch.index.seqno; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -33,7 +33,6 @@ import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import java.util.Collections; @@ -47,7 +46,7 @@ public class GlobalCheckpointSyncActionTests extends ESTestCase { private ThreadPool threadPool; - private Transport transport; + private CapturingTransport transport; private ClusterService clusterService; private TransportService transportService; private ShardStateAction shardStateAction; @@ -57,7 +56,7 @@ public void setUp() throws Exception { threadPool = new TestThreadPool(getClass().getName()); transport = new CapturingTransport(); clusterService = createClusterService(threadPool); - transportService = new TransportService(clusterService.getSettings(), transport, threadPool, + transportService = transport.createCapturingTransportService(clusterService.getSettings(), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index b2c828cb73f0c..4f4f39c614687 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -34,11 +34,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -85,23 +82,14 @@ public void testBackgroundGlobalCheckpointSync() throws Exception { (MockTransportService) internalCluster().getInstance(TransportService.class, node.getName()); final MockTransportService receiverTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, other.getName()); - - senderTransportService.addDelegate(receiverTransportService, - new MockTransportService.DelegateTransport(senderTransportService.original()) { - @Override - protected void sendRequest( - final Connection connection, - final long requestId, - final String action, - final TransportRequest request, - final TransportRequestOptions options) throws IOException { - if ("indices:admin/seq_no/global_checkpoint_sync[r]".equals(action)) { - throw new IllegalStateException("blocking indices:admin/seq_no/global_checkpoint_sync[r]"); - } else { - super.sendRequest(connection, requestId, action, request, options); - } - } - }); + senderTransportService.addSendBehavior(receiverTransportService, + (connection, requestId, action, request, options) -> { + if ("indices:admin/seq_no/global_checkpoint_sync[r]".equals(action)) { + throw new IllegalStateException("blocking indices:admin/seq_no/global_checkpoint_sync[r]"); + } else { + connection.sendRequest(requestId, action, request, options); + } + }); } } }, diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 8aaf6de1d6a00..3b5f77d72b6bb 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -70,8 +70,6 @@ import org.elasticsearch.test.MockIndexEventListener; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -338,19 +336,15 @@ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionExc final CountDownLatch hasCorrupted = new CountDownLatch(1); for (NodeStats dataNode : dataNodeStats) { MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) { - - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { - if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; - byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes; - int i = randomIntBetween(0, req.content().length() - 1); - array[i] = (byte) ~array[i]; // flip one byte in the content - hasCorrupted.countDown(); - } - super.sendRequest(connection, requestId, action, request, options); + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { + if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { + RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes; + int i = randomIntBetween(0, req.content().length() - 1); + array[i] = (byte) ~array[i]; // flip one byte in the content + hasCorrupted.countDown(); } + connection.sendRequest(requestId, action, request, options); }); } @@ -410,25 +404,21 @@ public void testCorruptionOnNetworkLayer() throws ExecutionException, Interrupte final boolean truncate = randomBoolean(); for (NodeStats dataNode : dataNodeStats) { MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) { - - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { - if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; - if (truncate && req.length() > 1) { - BytesRef bytesRef = req.content().toBytesRef(); - BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1); - request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos()); - } else { - assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!"; - final byte[] array = req.content().toBytesRef().bytes; - int i = randomIntBetween(0, req.content().length() - 1); - array[i] = (byte) ~array[i]; // flip one byte in the content - } + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { + if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { + RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + if (truncate && req.length() > 1) { + BytesRef bytesRef = req.content().toBytesRef(); + BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1); + request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos()); + } else { + assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!"; + final byte[] array = req.content().toBytesRef().bytes; + int i = randomIntBetween(0, req.content().length() - 1); + array[i] = (byte) ~array[i]; // flip one byte in the content } - super.sendRequest(connection, requestId, action, request, options); } + connection.sendRequest(requestId, action, request, options); }); } diff --git a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 24825b8a99ac2..25448d92d7e95 100644 --- a/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -38,8 +38,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -95,18 +93,14 @@ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, I for (NodeStats dataNode : nodeStats.getNodes()) { MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), - new MockTransportService.DelegateTransport(mockTransportService.original()) { - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - super.sendRequest(connection, requestId, action, request, options); + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + (connection, requestId, action, request, options) -> { + connection.sendRequest(requestId, action, request, options); if (action.equals(TransportShardBulkAction.ACTION_NAME) && exceptionThrown.compareAndSet(false, true)) { logger.debug("Throw ConnectTransportException"); throw new ConnectTransportException(connection.getNode(), action); } - } - }); + }); } BulkRequestBuilder bulkBuilder = client.prepareBulk(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index f72c30d9f4621..81afab4bb8f7e 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -61,6 +61,7 @@ import org.elasticsearch.test.store.MockFSDirectoryService; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportRequest; @@ -604,8 +605,8 @@ public void testDisconnectsWhileRecovering() throws Exception { TransportService blueTransportService = internalCluster().getInstance(TransportService.class, blueNodeName); final CountDownLatch requestBlocked = new CountDownLatch(1); - blueMockTransportService.addDelegate(redTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, blueMockTransportService.original(), requestBlocked)); - redMockTransportService.addDelegate(blueTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, redMockTransportService.original(), requestBlocked)); + blueMockTransportService.addSendBehavior(redTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, requestBlocked)); + redMockTransportService.addSendBehavior(blueTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, requestBlocked)); logger.info("--> starting recovery from blue to red"); client().admin().indices().prepareUpdateSettings(indexName).setSettings( @@ -626,21 +627,20 @@ public void testDisconnectsWhileRecovering() throws Exception { } - private class RecoveryActionBlocker extends MockTransportService.DelegateTransport { + private class RecoveryActionBlocker implements StubbableTransport.SendRequestBehavior { private final boolean dropRequests; private final String recoveryActionToBlock; private final CountDownLatch requestBlocked; - RecoveryActionBlocker(boolean dropRequests, String recoveryActionToBlock, Transport delegate, CountDownLatch requestBlocked) { - super(delegate); + RecoveryActionBlocker(boolean dropRequests, String recoveryActionToBlock, CountDownLatch requestBlocked) { this.dropRequests = dropRequests; this.recoveryActionToBlock = recoveryActionToBlock; this.requestBlocked = requestBlocked; } @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { + public void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException { if (recoveryActionToBlock.equals(action) || requestBlocked.getCount() == 0) { logger.info("--> preventing {} request", action); requestBlocked.countDown(); @@ -649,7 +649,7 @@ protected void sendRequest(Connection connection, long requestId, String action, } throw new ConnectTransportException(connection.getNode(), "DISCONNECT: prevented " + action + " request"); } - super.sendRequest(connection, requestId, action, request, options); + connection.sendRequest(requestId, action, request, options); } } @@ -692,12 +692,12 @@ public void testDisconnectsDuringRecovery() throws Exception { MockTransportService blueMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName); MockTransportService redMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, redNodeName); - redMockTransportService.addDelegate(blueMockTransportService, new MockTransportService.DelegateTransport(redMockTransportService.original()) { + redMockTransportService.addSendBehavior(blueMockTransportService, new StubbableTransport.SendRequestBehavior() { private final AtomicInteger count = new AtomicInteger(); @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { + public void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException { logger.info("--> sending request {} on {}", action, connection.getNode()); if (PeerRecoverySourceService.Actions.START_RECOVERY.equals(action) && count.incrementAndGet() == 1) { // ensures that it's considered as valid recovery attempt by source @@ -707,7 +707,7 @@ protected void sendRequest(Connection connection, long requestId, String action, } catch (InterruptedException e) { throw new RuntimeException(e); } - super.sendRequest(connection, requestId, action, request, options); + connection.sendRequest(requestId, action, request, options); try { Thread.sleep(disconnectAfterDelay.millis()); } catch (InterruptedException e) { @@ -715,35 +715,27 @@ protected void sendRequest(Connection connection, long requestId, String action, } throw new ConnectTransportException(connection.getNode(), "DISCONNECT: simulation disconnect after successfully sending " + action + " request"); } else { - super.sendRequest(connection, requestId, action, request, options); + connection.sendRequest(requestId, action, request, options); } } }); final AtomicBoolean finalized = new AtomicBoolean(); - blueMockTransportService.addDelegate(redMockTransportService, new MockTransportService.DelegateTransport(blueMockTransportService.original()) { - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - logger.info("--> sending request {} on {}", action, connection.getNode()); - if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE)) { - finalized.set(true); - } - super.sendRequest(connection, requestId, action, request, options); + blueMockTransportService.addSendBehavior(redMockTransportService, (connection, requestId, action, request, options) -> { + logger.info("--> sending request {} on {}", action, connection.getNode()); + if (action.equals(PeerRecoveryTargetService.Actions.FINALIZE)) { + finalized.set(true); } + connection.sendRequest(requestId, action, request, options); }); for (MockTransportService mockTransportService : Arrays.asList(redMockTransportService, blueMockTransportService)) { - mockTransportService.addDelegate(masterTransportService, new MockTransportService.DelegateTransport(mockTransportService.original()) { - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - logger.info("--> sending request {} on {}", action, connection.getNode()); - if ((primaryRelocation && finalized.get()) == false) { - assertNotEquals(action, ShardStateAction.SHARD_FAILED_ACTION_NAME); - } - super.sendRequest(connection, requestId, action, request, options); + mockTransportService.addSendBehavior(masterTransportService, (connection, requestId, action, request, options) -> { + logger.info("--> sending request {} on {}", action, connection.getNode()); + if ((primaryRelocation && finalized.get()) == false) { + assertNotEquals(action, ShardStateAction.SHARD_FAILED_ACTION_NAME); } + connection.sendRequest(requestId, action, request, options); }); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index f3e0c58d12432..5a267e1f34895 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -74,7 +74,6 @@ public void testTranslogHistoryTransferred() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32089") public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); diff --git a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 5c1c0d91e608b..7e99ccbbe6117 100644 --- a/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -55,8 +55,6 @@ import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -234,16 +232,13 @@ public void testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted( MockTransportService transportServiceNode_1 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_1); TransportService transportServiceNode_2 = internalCluster().getInstance(TransportService.class, node_2); final CountDownLatch shardActiveRequestSent = new CountDownLatch(1); - transportServiceNode_1.addDelegate(transportServiceNode_2, new MockTransportService.DelegateTransport(transportServiceNode_1.original()) { - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { - if (action.equals("internal:index/shard/exists") && shardActiveRequestSent.getCount() > 0) { - shardActiveRequestSent.countDown(); - logger.info("prevent shard active request from being sent"); - throw new ConnectTransportException(connection.getNode(), "DISCONNECT: simulated"); - } - super.sendRequest(connection, requestId, action, request, options); + transportServiceNode_1.addSendBehavior(transportServiceNode_2, (connection, requestId, action, request, options) -> { + if (action.equals("internal:index/shard/exists") && shardActiveRequestSent.getCount() > 0) { + shardActiveRequestSent.countDown(); + logger.info("prevent shard active request from being sent"); + throw new ConnectTransportException(connection.getNode(), "DISCONNECT: simulated"); } + connection.sendRequest(requestId, action, request, options); }); logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2); diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 738cd00a43d2e..cb93d803bb7c6 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -58,6 +58,7 @@ import org.elasticsearch.test.MockIndexEventListener; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; @@ -372,7 +373,7 @@ public void testCancellationCleansTempFiles() throws Exception { MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, p_node); for (DiscoveryNode node : clusterService.state().nodes()) { if (!node.equals(clusterService.localNode())) { - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, node.getName()), new RecoveryCorruption(mockTransportService.original(), corruptionCount)); + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, node.getName()), new RecoveryCorruption(corruptionCount)); } } @@ -485,17 +486,16 @@ public void testIndexAndRelocateConcurrently() throws ExecutionException, Interr } - class RecoveryCorruption extends MockTransportService.DelegateTransport { + class RecoveryCorruption implements StubbableTransport.SendRequestBehavior { private final CountDownLatch corruptionCount; - RecoveryCorruption(Transport transport, CountDownLatch corruptionCount) { - super(transport); + RecoveryCorruption(CountDownLatch corruptionCount) { this.corruptionCount = corruptionCount; } @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { + public void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request; if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { @@ -506,9 +506,9 @@ protected void sendRequest(Connection connection, long requestId, String action, array[0] = (byte) ~array[0]; // flip one byte in the content corruptionCount.countDown(); } - super.sendRequest(connection, requestId, action, request, options); + connection.sendRequest(requestId, action, request, options); } else { - super.sendRequest(connection, requestId, action, request, options); + connection.sendRequest(requestId, action, request, options); } } } diff --git a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index b33cfb508b930..ac8688c9847d3 100644 --- a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -35,11 +35,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -70,8 +67,8 @@ protected Collection> nodePlugins() { */ public void testCancelRecoveryAndResume() throws Exception { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES))) - .get().isAcknowledged()); + .put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES))) + .get().isAcknowledged()); NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); List dataNodeStats = new ArrayList<>(); @@ -91,9 +88,9 @@ public void testCancelRecoveryAndResume() throws Exception { // create the index and prevent allocation on any other nodes than the lucky one // we have no replicas so far and make sure that we allocate the primary on the lucky node assertAcked(prepareCreate("test") - .addMapping("type1", "field1", "type=text", "the_id", "type=text") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards()) - .put("index.routing.allocation.include._name", primariesNode.getNode().getName()))); // only allocate on the lucky node + .addMapping("type1", "field1", "type=text", "the_id", "type=text") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards()) + .put("index.routing.allocation.include._name", primariesNode.getNode().getName()))); // only allocate on the lucky node // index some docs and check if they are coming back int numDocs = randomIntBetween(100, 200); @@ -116,11 +113,8 @@ public void testCancelRecoveryAndResume() throws Exception { final AtomicBoolean truncate = new AtomicBoolean(true); for (NodeStats dataNode : dataNodeStats) { MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) { - - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); @@ -129,16 +123,15 @@ protected void sendRequest(Connection connection, long requestId, String action, throw new RuntimeException("Caused some truncated files for fun and profit"); } } - super.sendRequest(connection, requestId, action, request, options); - } - }); + connection.sendRequest(requestId, action, request, options); + }); } logger.info("--> bumping replicas to 1"); // client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put("index.routing.allocation.include._name", // now allow allocation on all nodes - primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName())).get(); + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.include._name", // now allow allocation on all nodes + primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName())).get(); latch.await(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index a14fca63154d7..b7c5bf03ac552 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; @@ -28,7 +29,11 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -39,6 +44,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueHours; import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.createRounding; +import static org.hamcrest.Matchers.equalTo; public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase { @@ -56,11 +63,12 @@ protected InternalAutoDateHistogram createTestInstance(String name, List pipelineAggregators, Map metaData, InternalAggregations aggregations) { - + roundingInfos = AutoDateHistogramAggregationBuilder.buildRoundings(null); int nbBuckets = randomNumberOfBuckets(); int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1); List buckets = new ArrayList<>(nbBuckets); + long startingDate = System.currentTimeMillis(); long interval = randomIntBetween(1, 3); @@ -72,23 +80,41 @@ protected InternalAutoDateHistogram createTestInstance(String name, } InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList()); BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations); + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + } + /* + This test was added to reproduce a bug where getAppropriateRounding was only ever using the first innerIntervals + passed in, instead of using the interval associated with the loop. + */ + public void testGetAppropriateRoundingUsesCorrectIntervals() { + RoundingInfo[] roundings = new RoundingInfo[6]; + DateTimeZone timeZone = DateTimeZone.UTC; + // Since we pass 0 as the starting index to getAppropriateRounding, we'll also use + // an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval() + // will be larger than the estimate. + roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), + 1000L, 1000); + roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), + 60 * 1000L, 1, 5, 10, 30); + roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), + 60 * 60 * 1000L, 1, 3, 12); - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); + // We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function + // to increment the rounding (because the bug was that the function would not use the innerIntervals + // from the new rounding. + int result = InternalAutoDateHistogram.getAppropriateRounding(timestamp.toEpochSecond()*1000, + timestamp.plusDays(1).toEpochSecond()*1000, 0, roundings, 25); + assertThat(result, equalTo(2)); } @Override protected void assertReduced(InternalAutoDateHistogram reduced, List inputs) { - int roundingIdx = 0; - for (InternalAutoDateHistogram histogram : inputs) { - if (histogram.getBucketInfo().roundingIdx > roundingIdx) { - roundingIdx = histogram.getBucketInfo().roundingIdx; - } - } - RoundingInfo roundingInfo = roundingInfos[roundingIdx]; long lowest = Long.MAX_VALUE; long highest = 0; + for (InternalAutoDateHistogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { long bucketKey = ((DateTime) bucket.getKey()).getMillis(); @@ -100,35 +126,72 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List= 0; j--) { + int interval = roundingInfo.innerIntervals[j]; + if (normalizedDuration / interval < reduced.getBuckets().size()) { + innerIntervalToUse = interval; + innerIntervalIndex = j; + } } } + + long intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis(); + int bucketCount = getBucketCount(lowest, highest, roundingInfo, intervalInMillis); + + //Next, if our bucketCount is still above what we need, we'll go back and determine the interval + // based on a size calculation. + if (bucketCount > reduced.getBuckets().size()) { + for (int i = innerIntervalIndex; i < roundingInfo.innerIntervals.length; i++) { + long newIntervalMillis = roundingInfo.innerIntervals[i] * roundingInfo.getRoughEstimateDurationMillis(); + if (getBucketCount(lowest, highest, roundingInfo, newIntervalMillis) <= reduced.getBuckets().size()) { + innerIntervalToUse = roundingInfo.innerIntervals[i]; + intervalInMillis = innerIntervalToUse * roundingInfo.getRoughEstimateDurationMillis(); + } + } + } + Map expectedCounts = new TreeMap<>(); - long intervalInMillis = innerIntervalToUse*roundingInfo.getRoughEstimateDurationMillis(); for (long keyForBucket = roundingInfo.rounding.round(lowest); - keyForBucket <= highest; + keyForBucket <= roundingInfo.rounding.round(highest); keyForBucket = keyForBucket + intervalInMillis) { expectedCounts.put(keyForBucket, 0L); + // Iterate through the input buckets, and for each bucket, determine if it's inside + // the range of the bucket in the outer loop. if it is, add the doc count to the total + // for that bucket. + for (InternalAutoDateHistogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { - long bucketKey = ((DateTime) bucket.getKey()).getMillis(); - long roundedBucketKey = roundingInfo.rounding.round(bucketKey); + long roundedBucketKey = roundingInfo.rounding.round(((DateTime) bucket.getKey()).getMillis()); + long docCount = bucket.getDocCount(); if (roundedBucketKey >= keyForBucket && roundedBucketKey < keyForBucket + intervalInMillis) { - long count = bucket.getDocCount(); expectedCounts.compute(keyForBucket, - (key, oldValue) -> (oldValue == null ? 0 : oldValue) + count); + (key, oldValue) -> (oldValue == null ? 0 : oldValue) + docCount); } } } } + // If there is only a single bucket, and we haven't added it above, add a bucket with no documents. + // this step is necessary because of the roundedBucketKey < keyForBucket + intervalInMillis above. + if (roundingInfo.rounding.round(lowest) == roundingInfo.rounding.round(highest) && expectedCounts.isEmpty()) { + expectedCounts.put(roundingInfo.rounding.round(lowest), 0L); + } + + // pick out the actual reduced values to the make the assertion more readable Map actualCounts = new TreeMap<>(); for (Histogram.Bucket bucket : reduced.getBuckets()) { actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), @@ -137,6 +200,16 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List instanceReader() { return InternalAutoDateHistogram::new; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 13e1489795996..f62598fa7c317 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -67,6 +67,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; @@ -90,42 +91,63 @@ public static class CustomScriptPlugin extends MockScriptPlugin { protected Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); - scripts.put("_agg['count'] = 1", vars -> - aggScript(vars, agg -> ((Map) agg).put("count", 1))); + scripts.put("state['count'] = 1", vars -> + aggScript(vars, state -> state.put("count", 1))); - scripts.put("_agg.add(1)", vars -> - aggScript(vars, agg -> ((List) agg).add(1))); + scripts.put("state.list.add(1)", vars -> + aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList()); + } + + ((List) state.get("list")).add(1); + })); - scripts.put("_agg[param1] = param2", vars -> - aggScript(vars, agg -> ((Map) agg).put(XContentMapValues.extractValue("params.param1", vars), + scripts.put("state[param1] = param2", vars -> + aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars), XContentMapValues.extractValue("params.param2", vars)))); - scripts.put("vars.multiplier = 3", vars -> - ((Map) vars.get("vars")).put("multiplier", 3)); + scripts.put("vars.multiplier = 3", vars -> { + ((Map) vars.get("vars")).put("multiplier", 3); + + Map state = (Map) vars.get("state"); + state.put("list", new ArrayList()); - scripts.put("_agg.add(vars.multiplier)", vars -> - aggScript(vars, agg -> ((List) agg).add(XContentMapValues.extractValue("vars.multiplier", vars)))); + return state; + }); + + scripts.put("state.list.add(vars.multiplier)", vars -> + aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList()); + } + + ((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars)); + })); // Equivalent to: // // newaggregation = []; // sum = 0; // - // for (a in _agg) { - // sum += a + // for (s in state.list) { + // sum += s // }; // // newaggregation.add(sum); // return newaggregation" // - scripts.put("sum agg values as a new aggregation", vars -> { + scripts.put("sum state values as a new aggregation", vars -> { List newAggregation = new ArrayList(); - List agg = (List) vars.get("_agg"); + Map state = (Map) vars.get("state"); + List list = (List) state.get("list"); - if (agg != null) { + if (list != null) { Integer sum = 0; - for (Object a : (List) agg) { - sum += ((Number) a).intValue(); + for (Object s : list) { + sum += ((Number) s).intValue(); } newAggregation.add(sum); } @@ -137,24 +159,41 @@ protected Map, Object>> pluginScripts() { // newaggregation = []; // sum = 0; // - // for (aggregation in _aggs) { - // for (a in aggregation) { - // sum += a + // for (state in states) { + // for (s in state) { + // sum += s // } // }; // // newaggregation.add(sum); // return newaggregation" // - scripts.put("sum aggs of agg values as a new aggregation", vars -> { + scripts.put("sum all states (lists) values as a new aggregation", vars -> { + List newAggregation = new ArrayList(); + Integer sum = 0; + + List> states = (List>) vars.get("states"); + for (List list : states) { + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); + } + } + } + newAggregation.add(sum); + return newAggregation; + }); + + scripts.put("sum all states' state.list values as a new aggregation", vars -> { List newAggregation = new ArrayList(); Integer sum = 0; - List aggs = (List) vars.get("_aggs"); - for (Object aggregation : (List) aggs) { - if (aggregation != null) { - for (Object a : (List) aggregation) { - sum += ((Number) a).intValue(); + List> states = (List>) vars.get("states"); + for (Map state : states) { + List list = (List) state.get("list"); + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); } } } @@ -167,25 +206,25 @@ protected Map, Object>> pluginScripts() { // newaggregation = []; // sum = 0; // - // for (aggregation in _aggs) { - // for (a in aggregation) { - // sum += a + // for (state in states) { + // for (s in state) { + // sum += s // } // }; // // newaggregation.add(sum * multiplier); // return newaggregation" // - scripts.put("multiplied sum aggs of agg values as a new aggregation", vars -> { + scripts.put("multiplied sum all states (lists) values as a new aggregation", vars -> { Integer multiplier = (Integer) vars.get("multiplier"); List newAggregation = new ArrayList(); Integer sum = 0; - List aggs = (List) vars.get("_aggs"); - for (Object aggregation : (List) aggs) { - if (aggregation != null) { - for (Object a : (List) aggregation) { - sum += ((Number) a).intValue(); + List> states = (List>) vars.get("states"); + for (List list : states) { + if (list != null) { + for (Object s : list) { + sum += ((Number) s).intValue(); } } } @@ -193,53 +232,12 @@ protected Map, Object>> pluginScripts() { return newAggregation; }); - scripts.put("state.items = new ArrayList()", vars -> - aggContextScript(vars, state -> ((HashMap) state).put("items", new ArrayList()))); - - scripts.put("state.items.add(1)", vars -> - aggContextScript(vars, state -> { - HashMap stateMap = (HashMap) state; - List items = (List) stateMap.get("items"); - items.add(1); - })); - - scripts.put("sum context state values", vars -> { - int sum = 0; - HashMap state = (HashMap) vars.get("state"); - List items = (List) state.get("items"); - - for (Object x : items) { - sum += (Integer)x; - } - - return sum; - }); - - scripts.put("sum context states", vars -> { - Integer sum = 0; - - List states = (List) vars.get("states"); - for (Object state : states) { - sum += ((Number) state).intValue(); - } - - return sum; - }); - return scripts; } - static Object aggScript(Map vars, Consumer fn) { - return aggScript(vars, fn, "_agg"); - } - - static Object aggContextScript(Map vars, Consumer fn) { - return aggScript(vars, fn, "state"); - } - @SuppressWarnings("unchecked") - private static Object aggScript(Map vars, Consumer fn, String stateVarName) { - T aggState = (T) vars.get(stateVarName); + static Map aggScript(Map vars, Consumer> fn) { + Map aggState = (Map) vars.get("state"); fn.accept(aggState); return aggState; } @@ -285,17 +283,17 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().cluster().preparePutStoredScript() .setId("mapScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"_agg.add(vars.multiplier)\"} }"), XContentType.JSON)); + " \"source\": \"state.list.add(vars.multiplier)\"} }"), XContentType.JSON)); assertAcked(client().admin().cluster().preparePutStoredScript() .setId("combineScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum agg values as a new aggregation\"} }"), XContentType.JSON)); + " \"source\": \"sum state values as a new aggregation\"} }"), XContentType.JSON)); assertAcked(client().admin().cluster().preparePutStoredScript() .setId("reduceScript_stored") .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum aggs of agg values as a new aggregation\"} }"), XContentType.JSON)); + " \"source\": \"sum all states (lists) values as a new aggregation\"} }"), XContentType.JSON)); indexRandom(true, builders); ensureSearchable(); @@ -315,9 +313,10 @@ public void setUp() throws Exception { // the name of the file script is used in test method while the source of the file script // must match a predefined script from CustomScriptPlugin.pluginScripts() method Files.write(scripts.resolve("init_script.mockscript"), "vars.multiplier = 3".getBytes("UTF-8")); - Files.write(scripts.resolve("map_script.mockscript"), "_agg.add(vars.multiplier)".getBytes("UTF-8")); - Files.write(scripts.resolve("combine_script.mockscript"), "sum agg values as a new aggregation".getBytes("UTF-8")); - Files.write(scripts.resolve("reduce_script.mockscript"), "sum aggs of agg values as a new aggregation".getBytes("UTF-8")); + Files.write(scripts.resolve("map_script.mockscript"), "state.list.add(vars.multiplier)".getBytes("UTF-8")); + Files.write(scripts.resolve("combine_script.mockscript"), "sum state values as a new aggregation".getBytes("UTF-8")); + Files.write(scripts.resolve("reduce_script.mockscript"), + "sum all states (lists) values as a new aggregation".getBytes("UTF-8")); } catch (IOException e) { throw new RuntimeException("failed to create scripts"); } @@ -329,7 +328,7 @@ protected Path nodeConfigPath(int nodeOrdinal) { } public void testMap() { - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -365,52 +364,12 @@ public void testMap() { assertThat(numShardsRun, greaterThan(0)); } - public void testExplicitAggParam() { - Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); - - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); - - SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)) - .get(); - assertSearchResponse(response); - assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class)); - List aggregationList = (List) scriptedMetricAggregation.aggregation(); - assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries)); - long totalCount = 0; - for (Object object : aggregationList) { - assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; - for (Object o : list) { - assertThat(o, notNullValue()); - assertThat(o, instanceOf(Number.class)); - Number numberValue = (Number) o; - assertThat(numberValue, equalTo((Number) 1)); - totalCount += numberValue.longValue(); - } - } - assertThat(totalCount, equalTo(numDocs)); - } - - public void testMapWithParamsAndImplicitAggMap() { + public void testMapWithParams() { // Split the params up between the script and the aggregation. - // Don't put any _agg map in params. Map scriptParams = Collections.singletonMap("param1", "12"); Map aggregationParams = Collections.singletonMap("param2", 1); - // The _agg hashmap will be available even if not declared in the params map - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", scriptParams); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state[param1] = param2", scriptParams); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -454,7 +413,6 @@ public void testInitMapWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); SearchResponse response = client() @@ -466,7 +424,7 @@ public void testInitMapWithParams() { .initScript( new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) .mapScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "_agg.add(vars.multiplier)", Collections.emptyMap()))) + "state.list.add(vars.multiplier)", Collections.emptyMap()))) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -483,8 +441,11 @@ public void testInitMapWithParams() { long totalCount = 0; for (Object object : aggregationList) { assertThat(object, notNullValue()); - assertThat(object, instanceOf(List.class)); - List list = (List) object; + assertThat(object, instanceOf(HashMap.class)); + Map map = (Map) object; + assertThat(map, hasKey("list")); + assertThat(map.get("list"), instanceOf(List.class)); + List list = (List) map.get("list"); for (Object o : list) { assertThat(o, notNullValue()); assertThat(o, instanceOf(Number.class)); @@ -501,12 +462,11 @@ public void testMapCombineWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -553,13 +513,13 @@ public void testInitMapCombineWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -607,15 +567,15 @@ public void testInitMapCombineReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -652,15 +612,15 @@ public void testInitMapCombineReduceGetProperty() throws Exception { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client() .prepareSearch("idx") @@ -707,14 +667,14 @@ public void testMapCombineReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -749,13 +709,13 @@ public void testInitMapReduceWithParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -789,12 +749,12 @@ public void testMapReduceWithParams() { Map varsMap = new HashMap<>(); varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -828,18 +788,18 @@ public void testInitMapCombineReduceWithParamsAndReduceParams() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Map reduceParams = new HashMap<>(); reduceParams.put("multiplier", 4); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "multiplied sum aggs of agg values as a new aggregation", reduceParams); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "multiplied sum all states (lists) values as a new aggregation", reduceParams); SearchResponse response = client() .prepareSearch("idx") @@ -875,7 +835,6 @@ public void testInitMapCombineReduceWithParamsStored() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); SearchResponse response = client() @@ -916,15 +875,15 @@ public void testInitMapCombineReduceWithParamsAsSubAgg() { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse response = client() .prepareSearch("idx") @@ -977,15 +936,15 @@ public void testEmptyAggregation() throws Exception { varsMap.put("multiplier", 1); Map params = new HashMap<>(); - params.put("_agg", new ArrayList<>()); params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(vars.multiplier)", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", + Collections.emptyMap()); Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum agg values as a new aggregation", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum aggs of agg values as a new aggregation", Collections.emptyMap()); + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", Collections.emptyMap()); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) @@ -1021,7 +980,7 @@ public void testEmptyAggregation() throws Exception { * not using a script does get cached. */ public void testDontCacheScripts() throws Exception { - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg['count'] = 1", Collections.emptyMap()); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get()); @@ -1047,7 +1006,7 @@ public void testDontCacheScripts() throws Exception { public void testConflictingAggAndScriptParams() { Map params = Collections.singletonMap("param1", "12"); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", params); SearchRequestBuilder builder = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -1056,37 +1015,4 @@ public void testConflictingAggAndScriptParams() { SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); } - - public void testAggFromContext() { - Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items = new ArrayList()", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.items.add(1)", Collections.emptyMap()); - Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context state values", Collections.emptyMap()); - Script reduceScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum context states", - Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); - - Aggregation aggregation = response.getAggregations().get("scripted"); - assertThat(aggregation, notNullValue()); - assertThat(aggregation, instanceOf(ScriptedMetric.class)); - - ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation; - assertThat(scriptedMetricAggregation.getName(), equalTo("scripted")); - assertThat(scriptedMetricAggregation.aggregation(), notNullValue()); - - assertThat(scriptedMetricAggregation.aggregation(), instanceOf(Integer.class)); - Integer aggResult = (Integer) scriptedMetricAggregation.aggregation(); - long totalAgg = aggResult.longValue(); - assertThat(totalAgg, equalTo(numDocs)); - } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java new file mode 100644 index 0000000000000..4abf68a960b11 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricAggStateV6CompatTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.scripted; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptedMetricAggContexts; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.Aggregation.CommonFields; +import org.elasticsearch.search.aggregations.ParsedAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.test.InternalAggregationTestCase; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; + +/** + * This test verifies that the _aggs param is added correctly when the system property + * "es.aggregations.enable_scripted_metric_agg_param" is set to true. + */ +public class InternalScriptedMetricAggStateV6CompatTests extends InternalAggregationTestCase { + + private static final String REDUCE_SCRIPT_NAME = "reduceScript"; + + @Override + protected InternalScriptedMetric createTestInstance(String name, List pipelineAggregators, + Map metaData) { + Script reduceScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, REDUCE_SCRIPT_NAME, Collections.emptyMap()); + return new InternalScriptedMetric(name, "agg value", reduceScript, pipelineAggregators, metaData); + } + + /** + * Mock of the script service. The script that is run looks at the + * "_aggs" parameter to verify that it was put in place by InternalScriptedMetric. + */ + @Override + protected ScriptService mockScriptService() { + Function, Object> script = params -> { + Object aggs = params.get("_aggs"); + Object states = params.get("states"); + assertThat(aggs, instanceOf(List.class)); + assertThat(aggs, sameInstance(states)); + return aggs; + }; + + @SuppressWarnings("unchecked") + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, + Collections.singletonMap(REDUCE_SCRIPT_NAME, script)); + Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); + return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + } + + @Override + protected void assertReduced(InternalScriptedMetric reduced, List inputs) { + assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); + } + + @Override + protected Reader instanceReader() { + return InternalScriptedMetric::new; + } + + @Override + protected void assertFromXContent(InternalScriptedMetric aggregation, ParsedAggregation parsedAggregation) {} + + @Override + protected Predicate excludePathsFromXContentInsertion() { + return path -> path.contains(CommonFields.VALUE.getPreferredName()); + } + + @Override + protected InternalScriptedMetric mutateInstance(InternalScriptedMetric instance) { + String name = instance.getName(); + Object value = instance.aggregation(); + Script reduceScript = instance.reduceScript; + List pipelineAggregators = instance.pipelineAggregators(); + Map metaData = instance.getMetaData(); + return new InternalScriptedMetric(name + randomAlphaOfLength(5), value, reduceScript, pipelineAggregators, + metaData); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java index 584208af4177c..70ddacf5698b2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetricTests.java @@ -107,7 +107,7 @@ private static Object randomValue(Supplier[] valueTypes, int level) { /** * Mock of the script service. The script that is run looks at the - * "_aggs" parameter visible when executing the script and simply returns the count. + * "states" context variable visible when executing the script and simply returns the count. * This should be equal to the number of input InternalScriptedMetrics that are reduced * in total. */ @@ -116,7 +116,7 @@ protected ScriptService mockScriptService() { // mock script always retuns the size of the input aggs list as result @SuppressWarnings("unchecked") MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, - Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List) script.get("_aggs")).size())); + Collections.singletonMap(REDUCE_SCRIPT_NAME, script -> ((List) script.get("states")).size())); Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java new file mode 100644 index 0000000000000..bf78cae711b9d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorAggStateV6CompatTests.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.scripted; + +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptedMetricAggContexts; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +import static java.util.Collections.singleton; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; + +/** + * This test verifies that the _agg param is added correctly when the system property + * "es.aggregations.enable_scripted_metric_agg_param" is set to true. + */ +public class ScriptedMetricAggregatorAggStateV6CompatTests extends AggregatorTestCase { + + private static final String AGG_NAME = "scriptedMetric"; + private static final Script INIT_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScript", Collections.emptyMap()); + private static final Script MAP_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScript", Collections.emptyMap()); + private static final Script COMBINE_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScript", + Collections.emptyMap()); + + private static final Script INIT_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, + "initScriptExplicitAgg", Collections.emptyMap()); + private static final Script MAP_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, + "mapScriptExplicitAgg", Collections.emptyMap()); + private static final Script COMBINE_SCRIPT_EXPLICIT_AGG = new Script(ScriptType.INLINE, MockScriptEngine.NAME, + "combineScriptExplicitAgg", Collections.emptyMap()); + private static final String EXPLICIT_AGG_OBJECT = "Explicit agg object"; + + private static final Map, Object>> SCRIPTS = new HashMap<>(); + + @BeforeClass + @SuppressWarnings("unchecked") + public static void initMockScripts() { + // If _agg is provided implicitly, it should be the same objects as "state" from the context. + SCRIPTS.put("initScript", params -> { + Object agg = params.get("_agg"); + Object state = params.get("state"); + assertThat(agg, instanceOf(Map.class)); + assertThat(agg, sameInstance(state)); + return agg; + }); + SCRIPTS.put("mapScript", params -> { + Object agg = params.get("_agg"); + Object state = params.get("state"); + assertThat(agg, instanceOf(Map.class)); + assertThat(agg, sameInstance(state)); + return agg; + }); + SCRIPTS.put("combineScript", params -> { + Object agg = params.get("_agg"); + Object state = params.get("state"); + assertThat(agg, instanceOf(Map.class)); + assertThat(agg, sameInstance(state)); + return agg; + }); + + SCRIPTS.put("initScriptExplicitAgg", params -> { + Object agg = params.get("_agg"); + assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); + return agg; + }); + SCRIPTS.put("mapScriptExplicitAgg", params -> { + Object agg = params.get("_agg"); + assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); + return agg; + }); + SCRIPTS.put("combineScriptExplicitAgg", params -> { + Object agg = params.get("_agg"); + assertThat(agg, equalTo(EXPLICIT_AGG_OBJECT)); + return agg; + }); + } + + /** + * Test that the _agg param is implicitly added + */ + public void testWithImplicitAggParam() throws IOException { + try (Directory directory = newDirectory()) { + Integer numDocs = 10; + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT); + search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); + } + } + + assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); + } + + /** + * Test that an explicitly added _agg param is honored + */ + public void testWithExplicitAggParam() throws IOException { + try (Directory directory = newDirectory()) { + Integer numDocs = 10; + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + Map aggParams = new HashMap<>(); + aggParams.put("_agg", EXPLICIT_AGG_OBJECT); + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder + .params(aggParams) + .initScript(INIT_SCRIPT_EXPLICIT_AGG) + .mapScript(MAP_SCRIPT_EXPLICIT_AGG) + .combineScript(COMBINE_SCRIPT_EXPLICIT_AGG); + search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); + } + } + + assertWarnings(ScriptedMetricAggContexts.AGG_PARAM_DEPRECATION_WARNING); + } + + /** + * We cannot use Mockito for mocking QueryShardContext in this case because + * script-related methods (e.g. QueryShardContext#getLazyExecutableScript) + * is final and cannot be mocked + */ + @Override + protected QueryShardContext queryShardContextMock(MapperService mapperService) { + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, SCRIPTS); + Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); + ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + return new QueryShardContext(0, mapperService.getIndexSettings(), null, null, mapperService, null, scriptService, + xContentRegistry(), writableRegistry(), null, null, System::currentTimeMillis, null); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index 97317e794f6cb..aaf4344999fb8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -76,53 +76,53 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { @SuppressWarnings("unchecked") public static void initMockScripts() { SCRIPTS.put("initScript", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - return agg; - }); + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + return state; + }); SCRIPTS.put("mapScript", params -> { - Map agg = (Map) params.get("_agg"); - ((List) agg.get("collector")).add(1); // just add 1 for each doc the script is run on - return agg; + Map state = (Map) params.get("state"); + ((List) state.get("collector")).add(1); // just add 1 for each doc the script is run on + return state; }); SCRIPTS.put("combineScript", params -> { - Map agg = (Map) params.get("_agg"); - return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).sum(); + Map state = (Map) params.get("state"); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).sum(); }); SCRIPTS.put("initScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - agg.put("collector", new ArrayList()); - return agg; - }); + Map state = (Map) params.get("state"); + state.put("collector", new ArrayList()); + return state; + }); SCRIPTS.put("mapScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - ((List) agg.get("collector")).add(((Number) params.get("_score")).doubleValue()); - return agg; + Map state = (Map) params.get("state"); + ((List) state.get("collector")).add(((Number) params.get("_score")).doubleValue()); + return state; }); SCRIPTS.put("combineScriptScore", params -> { - Map agg = (Map) params.get("_agg"); - return ((List) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); + Map state = (Map) params.get("state"); + return ((List) state.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); }); SCRIPTS.put("initScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); Integer initialValue = (Integer)params.get("initialValue"); ArrayList collector = new ArrayList(); collector.add(initialValue); - agg.put("collector", collector); - return agg; + state.put("collector", collector); + return state; }); SCRIPTS.put("mapScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); Integer itemValue = (Integer) params.get("itemValue"); - ((List) agg.get("collector")).add(itemValue); - return agg; + ((List) state.get("collector")).add(itemValue); + return state; }); SCRIPTS.put("combineScriptParams", params -> { - Map agg = (Map) params.get("_agg"); + Map state = (Map) params.get("state"); int divisor = ((Integer) params.get("divisor")); - return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + return ((List) state.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); }); } @@ -144,7 +144,7 @@ public void testNoDocs() throws IOException { } /** - * without combine script, the "_aggs" map should contain a list of the size of the number of documents matched + * without combine script, the "states" map should contain a list of the size of the number of documents matched */ @SuppressWarnings("unchecked") public void testScriptedMetricWithoutCombine() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 95da15e838c31..e607483185953 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -597,10 +597,10 @@ private static void setRandomCommonOptions(AbstractHighlighterBuilder highlightB value = randomAlphaOfLengthBetween(1, 10); break; case 1: - value = new Integer(randomInt(1000)); + value = Integer.valueOf(randomInt(1000)); break; case 2: - value = new Boolean(randomBoolean()); + value = Boolean.valueOf(randomBoolean()); break; } options.put(randomAlphaOfLengthBetween(1, 10), value); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index 89f1b2ae28691..fcb4de5e04a8b 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -581,15 +581,24 @@ public void testGeoNeighbours() throws Exception { } public void testGeoField() throws Exception { -// Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha5); -// Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder mapping = jsonBuilder(); mapping.startObject(); mapping.startObject(TYPE); mapping.startObject("properties"); + mapping.startObject("location"); + mapping.startObject("properties"); mapping.startObject("pin"); mapping.field("type", "geo_point"); + // Enable store and disable indexing sometimes + if (randomBoolean()) { + mapping.field("store", "true"); + } + if (randomBoolean()) { + mapping.field("index", "false"); + } + mapping.endObject(); // pin mapping.endObject(); + mapping.endObject(); // location mapping.startObject(FIELD); mapping.field("type", "completion"); mapping.field("analyzer", "simple"); @@ -598,7 +607,7 @@ public void testGeoField() throws Exception { mapping.startObject(); mapping.field("name", "st"); mapping.field("type", "geo"); - mapping.field("path", "pin"); + mapping.field("path", "location.pin"); mapping.field("precision", 5); mapping.endObject(); mapping.endArray(); @@ -612,7 +621,9 @@ public void testGeoField() throws Exception { XContentBuilder source1 = jsonBuilder() .startObject() + .startObject("location") .latlon("pin", 52.529172, 13.407333) + .endObject() .startObject(FIELD) .array("input", "Hotel Amsterdam in Berlin") .endObject() @@ -621,7 +632,9 @@ public void testGeoField() throws Exception { XContentBuilder source2 = jsonBuilder() .startObject() + .startObject("location") .latlon("pin", 52.363389, 4.888695) + .endObject() .startObject(FIELD) .array("input", "Hotel Berlin in Amsterdam") .endObject() @@ -693,6 +706,7 @@ public void assertSuggestions(String suggestionName, SuggestionBuilder suggestBu private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException { createIndexAndMappingAndSettings(Settings.EMPTY, completionMappingBuilder); } + private void createIndexAndMappingAndSettings(Settings settings, CompletionMappingBuilder completionMappingBuilder) throws IOException { XContentBuilder mapping = jsonBuilder().startObject() .startObject(TYPE).startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java index 260d919fd42ca..fa6dc59241dd0 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java @@ -203,6 +203,67 @@ public void testIndexingWithMultipleContexts() throws Exception { assertContextSuggestFields(fields, 3); } + public void testMalformedGeoField() throws Exception { + XContentBuilder mapping = jsonBuilder(); + mapping.startObject(); + mapping.startObject("type1"); + mapping.startObject("properties"); + String type = randomFrom("text", "keyword", "long", null); + if (type != null) { + mapping.startObject("pin"); + mapping.field("type", type); + mapping.endObject(); + } + mapping.startObject("suggestion"); + mapping.field("type", "completion"); + mapping.field("analyzer", "simple"); + + mapping.startArray("contexts"); + mapping.startObject(); + mapping.field("name", "st"); + mapping.field("type", "geo"); + mapping.field("path", "pin"); + mapping.field("precision", 5); + mapping.endObject(); + mapping.endArray(); + + mapping.endObject(); + + mapping.endObject(); + mapping.endObject(); + mapping.endObject(); + + MapperService mapperService = createIndex("test", Settings.EMPTY, "type1", mapping).mapperService(); + XContentBuilder builder = jsonBuilder().startObject(); + if (type != null) { + switch (type) { + case "keyword": + case "text": + builder.field("pin", "52.529172, 13.407333"); + break; + case "long": + builder.field("pin", 1234); + break; + case "object": + builder.latlon("pin", 52.529172, 13.407333); + break; + } + } else { + builder.field("pin", "52.529172, 13.407333"); + } + + builder.startObject("suggestion") + .array("input", "Hotel Amsterdam in Berlin") + .endObject() + .endObject(); + + ParsedDocument parsedDocument = mapperService.documentMapper("type1").parse( + SourceToParse.source("test", "type1", "1", BytesReference.bytes(builder), XContentType.JSON)); + IndexableField[] fields = parsedDocument.rootDoc().getFields("suggestion"); + // Make sure that in 6.x all these cases are still parsed + assertContextSuggestFields(fields, 1); + } + public void testParsingQueryContextBasic() throws Exception { XContentBuilder builder = jsonBuilder().value("ezs42e44yx96"); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java new file mode 100644 index 0000000000000..3c099c32bde2d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionManagerTests.java @@ -0,0 +1,232 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ConnectionManagerTests extends ESTestCase { + + private ConnectionManager connectionManager; + private ThreadPool threadPool; + private Transport transport; + private ConnectionProfile connectionProfile; + + @Before + public void createConnectionManager() { + Settings settings = Settings.builder() + .put("node.name", ConnectionManagerTests.class.getSimpleName()) + .build(); + threadPool = new ThreadPool(settings); + transport = mock(Transport.class); + connectionManager = new ConnectionManager(settings, transport, threadPool); + TimeValue oneSecond = new TimeValue(1000); + connectionProfile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, oneSecond, oneSecond); + } + + @After + public void stopThreadPool() { + threadPool.shutdown(); + } + + public void testConnectionProfileResolve() { + final ConnectionProfile defaultProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY); + assertEquals(defaultProfile, ConnectionProfile.resolveConnectionProfile(null, defaultProfile)); + + final ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.BULK); + builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.RECOVERY); + builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.REG); + builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.STATE); + builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.PING); + + final boolean connectionTimeoutSet = randomBoolean(); + if (connectionTimeoutSet) { + builder.setConnectTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); + } + final boolean connectionHandshakeSet = randomBoolean(); + if (connectionHandshakeSet) { + builder.setHandshakeTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); + } + + final ConnectionProfile profile = builder.build(); + final ConnectionProfile resolved = ConnectionProfile.resolveConnectionProfile(profile, defaultProfile); + assertNotEquals(resolved, defaultProfile); + assertThat(resolved.getNumConnections(), equalTo(profile.getNumConnections())); + assertThat(resolved.getHandles(), equalTo(profile.getHandles())); + + assertThat(resolved.getConnectTimeout(), + equalTo(connectionTimeoutSet ? profile.getConnectTimeout() : defaultProfile.getConnectTimeout())); + assertThat(resolved.getHandshakeTimeout(), + equalTo(connectionHandshakeSet ? profile.getHandshakeTimeout() : defaultProfile.getHandshakeTimeout())); + } + + public void testDefaultConnectionProfile() { + ConnectionProfile profile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY); + assertEquals(13, profile.getNumConnections()); + assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); + assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); + assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); + assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); + assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); + + profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.master", false).build()); + assertEquals(12, profile.getNumConnections()); + assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); + assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); + assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); + assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); + assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); + + profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.data", false).build()); + assertEquals(11, profile.getNumConnections()); + assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); + assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); + assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); + assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); + assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); + + profile = ConnectionManager.buildDefaultConnectionProfile(Settings.builder().put("node.data", false) + .put("node.master", false).build()); + assertEquals(10, profile.getNumConnections()); + assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); + assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); + assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); + assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); + assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); + } + + public void testConnectAndDisconnect() { + AtomicInteger nodeConnectedCount = new AtomicInteger(); + AtomicInteger nodeDisconnectedCount = new AtomicInteger(); + connectionManager.addListener(new TransportConnectionListener() { + @Override + public void onNodeConnected(DiscoveryNode node) { + nodeConnectedCount.incrementAndGet(); + } + + @Override + public void onNodeDisconnected(DiscoveryNode node) { + nodeDisconnectedCount.incrementAndGet(); + } + }); + + + DiscoveryNode node = new DiscoveryNode("", new TransportAddress(InetAddress.getLoopbackAddress(), 0), Version.CURRENT); + Transport.Connection connection = new TestConnect(node); + when(transport.openConnection(node, connectionProfile)).thenReturn(connection); + + assertFalse(connectionManager.nodeConnected(node)); + + AtomicReference connectionRef = new AtomicReference<>(); + CheckedBiConsumer validator = (c, p) -> connectionRef.set(c); + connectionManager.connectToNode(node, connectionProfile, validator); + + assertFalse(connection.isClosed()); + assertTrue(connectionManager.nodeConnected(node)); + assertSame(connection, connectionManager.getConnection(node)); + assertEquals(1, connectionManager.connectedNodeCount()); + assertEquals(1, nodeConnectedCount.get()); + assertEquals(0, nodeDisconnectedCount.get()); + + if (randomBoolean()) { + connectionManager.disconnectFromNode(node); + } else { + connection.close(); + } + assertTrue(connection.isClosed()); + assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(1, nodeConnectedCount.get()); + assertEquals(1, nodeDisconnectedCount.get()); + } + + public void testConnectFails() { + AtomicInteger nodeConnectedCount = new AtomicInteger(); + AtomicInteger nodeDisconnectedCount = new AtomicInteger(); + connectionManager.addListener(new TransportConnectionListener() { + @Override + public void onNodeConnected(DiscoveryNode node) { + nodeConnectedCount.incrementAndGet(); + } + + @Override + public void onNodeDisconnected(DiscoveryNode node) { + nodeDisconnectedCount.incrementAndGet(); + } + }); + + + DiscoveryNode node = new DiscoveryNode("", new TransportAddress(InetAddress.getLoopbackAddress(), 0), Version.CURRENT); + Transport.Connection connection = new TestConnect(node); + when(transport.openConnection(node, connectionProfile)).thenReturn(connection); + + assertFalse(connectionManager.nodeConnected(node)); + + CheckedBiConsumer validator = (c, p) -> { + throw new ConnectTransportException(node, ""); + }; + + expectThrows(ConnectTransportException.class, () -> connectionManager.connectToNode(node, connectionProfile, validator)); + + assertTrue(connection.isClosed()); + assertFalse(connectionManager.nodeConnected(node)); + expectThrows(NodeNotConnectedException.class, () -> connectionManager.getConnection(node)); + assertEquals(0, connectionManager.connectedNodeCount()); + assertEquals(0, nodeConnectedCount.get()); + assertEquals(0, nodeDisconnectedCount.get()); + } + + private static class TestConnect extends CloseableConnection { + + private final DiscoveryNode node; + + private TestConnect(DiscoveryNode node) { + this.node = node; + } + + @Override + public DiscoveryNode getNode() { + return node; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws TransportException { + + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index f124d765bbf23..f3b52f9a4bde5 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Build; import org.elasticsearch.Version; @@ -112,11 +113,11 @@ public static MockTransportService startTransport(String id, List } public static MockTransportService startTransport( - final String id, - final List knownNodes, - final Version version, - final ThreadPool threadPool, - final Settings settings) { + final String id, + final List knownNodes, + final Version version, + final ThreadPool threadPool, + final Settings settings) { boolean success = false; final Settings s = Settings.builder().put(settings).put("node.name", id).build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); @@ -165,8 +166,8 @@ public void testLocalProfileIsUsedForLocalCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -205,8 +206,8 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -261,8 +262,8 @@ public void testDiscoverSingleNode() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -283,7 +284,7 @@ public void testDiscoverSingleNodeWithIncompatibleSeed() throws Exception { knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(incompatibleTransport.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(incompatibleSeedNode, seedNode); + List> seedNodes = Arrays.asList(() -> incompatibleSeedNode, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -317,8 +318,8 @@ public void testNodeDisconnected() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertFalse(service.nodeConnected(spareNode)); @@ -366,8 +367,8 @@ public void testFilterDiscoveredNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); if (rejectedNode.equals(seedNode)) { assertFalse(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); @@ -381,7 +382,7 @@ public void testFilterDiscoveredNodes() throws Exception { } } - private void updateSeedNodes(RemoteClusterConnection connection, List seedNodes) throws Exception { + private void updateSeedNodes(RemoteClusterConnection connection, List> seedNodes) throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionAtomicReference = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(x -> latch.countDown(), x -> { @@ -405,8 +406,8 @@ public void testConnectWithIncompatibleTransports() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(seedNode))); + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode))); assertFalse(service.nodeConnected(seedNode)); assertTrue(connection.assertNoRunningConnections()); } @@ -443,23 +444,32 @@ public void sendRequest(long requestId, String action, TransportRequest request, } @Override - public void close() throws IOException { + public void addCloseListener(ActionListener listener) { // no-op } - }; - service.addDelegate(seedNode.getAddress(), new MockTransportService.DelegateTransport(service.getOriginalTransport()) { + @Override - public Connection getConnection(DiscoveryNode node) { - if (node == seedNode) { - return seedConnection; - } - return super.getConnection(node); + public boolean isClosed() { + return false; + } + + @Override + public void close() { + // no-op + } + }; + + service.addGetConnectionBehavior(seedNode.getAddress(), (connectionManager, discoveryNode) -> { + if (discoveryNode == seedNode) { + return seedConnection; } + return connectionManager.getConnection(discoveryNode); }); + service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(seedNode); for (DiscoveryNode node : knownNodes) { final Transport.Connection transportConnection = connection.getConnection(node); @@ -502,7 +512,7 @@ public void run() { CountDownLatch listenerCalled = new CountDownLatch(1); AtomicReference exceptionReference = new AtomicReference<>(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -510,7 +520,7 @@ public void run() { exceptionReference.set(x); listenerCalled.countDown(); }); - connection.updateSeedNodes(Arrays.asList(seedNode), listener); + connection.updateSeedNodes(Arrays.asList(() -> seedNode), listener); acceptedLatch.await(); connection.close(); // now close it, this should trigger an interrupt on the socket and we can move on assertTrue(connection.assertNoRunningConnections()); @@ -518,7 +528,9 @@ public void run() { closeRemote.countDown(); listenerCalled.await(); assertNotNull(exceptionReference.get()); - expectThrows(CancellableThreads.ExecutionCancelledException.class, () -> {throw exceptionReference.get();}); + expectThrows(CancellableThreads.ExecutionCancelledException.class, () -> { + throw exceptionReference.get(); + }); } } @@ -535,7 +547,7 @@ public void testFetchShards() throws Exception { try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); - List nodes = Collections.singletonList(seedNode); + List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", nodes, service, Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { @@ -552,7 +564,7 @@ public void testFetchShards() throws Exception { .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) .routing(request.routing()); connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); + new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); responseLatch.await(); assertNull(failReference.get()); assertNotNull(reference.get()); @@ -575,7 +587,7 @@ public void testFetchShardsThreadContextHeader() throws Exception { try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { service.start(); service.acceptIncomingRequests(); - List nodes = Collections.singletonList(seedNode); + List> nodes = Collections.singletonList(() -> seedNode); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", nodes, service, Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); @@ -631,18 +643,18 @@ public void testFetchShardsSkipUnavailable() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { SearchRequest request = new SearchRequest("test-index"); ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") - .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) - .routing(request.routing()); + .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) + .routing(request.routing()); { CountDownLatch responseLatch = new CountDownLatch(1); AtomicReference reference = new AtomicReference<>(); AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); + new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); assertTrue(responseLatch.await(5, TimeUnit.SECONDS)); assertNull(failReference.get()); assertNotNull(reference.get()); @@ -671,7 +683,9 @@ public void onNodeDisconnected(DiscoveryNode node) { AtomicReference reference = new AtomicReference<>(); AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); + new LatchedActionListener<>(ActionListener.wrap((s) -> { + reference.set(s); + }, failReference::set), responseLatch)); assertTrue(responseLatch.await(1, TimeUnit.SECONDS)); assertNotNull(failReference.get()); assertNull(reference.get()); @@ -684,7 +698,7 @@ public void onNodeDisconnected(DiscoveryNode node) { AtomicReference reference = new AtomicReference<>(); AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); + new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); assertTrue(responseLatch.await(1, TimeUnit.SECONDS)); assertNull(failReference.get()); assertNotNull(reference.get()); @@ -707,7 +721,7 @@ public void onNodeDisconnected(DiscoveryNode node) { AtomicReference reference = new AtomicReference<>(); AtomicReference failReference = new AtomicReference<>(); connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); + new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); assertTrue(responseLatch.await(1, TimeUnit.SECONDS)); assertNull(failReference.get()); assertNotNull(reference.get()); @@ -732,7 +746,7 @@ public void testTriggerUpdatesConcurrently() throws IOException, InterruptedExce knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(seedTransport1.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(seedNode1, seedNode); + List> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -754,27 +768,28 @@ public void run() { for (int i = 0; i < numConnectionAttempts; i++) { AtomicBoolean executed = new AtomicBoolean(false); ActionListener listener = ActionListener.wrap( - x -> { - assertTrue(executed.compareAndSet(false, true)); - latch.countDown();}, - x -> { - /* - * This can occur on a thread submitted to the thread pool while we are closing the - * remote cluster connection at the end of the test. - */ - if (x instanceof CancellableThreads.ExecutionCancelledException) { - // we should already be shutting down - assertTrue(executed.get()); - return; - } + x -> { + assertTrue(executed.compareAndSet(false, true)); + latch.countDown(); + }, + x -> { + /* + * This can occur on a thread submitted to the thread pool while we are closing the + * remote cluster connection at the end of the test. + */ + if (x instanceof CancellableThreads.ExecutionCancelledException) { + // we should already be shutting down + assertTrue(executed.get()); + return; + } - assertTrue(executed.compareAndSet(false, true)); - latch.countDown(); + assertTrue(executed.compareAndSet(false, true)); + latch.countDown(); - if (!(x instanceof RejectedExecutionException)) { - throw new AssertionError(x); - } - }); + if (!(x instanceof RejectedExecutionException)) { + throw new AssertionError(x); + } + }); connection.updateSeedNodes(seedNodes, listener); } latch.await(); @@ -809,7 +824,7 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt knownNodes.add(discoverableTransport.getLocalDiscoNode()); knownNodes.add(seedTransport1.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(seedNode1, seedNode); + List> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -861,7 +876,7 @@ public void run() { } else { throw new AssertionError(x); } - }); + }); try { connection.updateSeedNodes(seedNodes, listener); } catch (Exception e) { @@ -913,7 +928,7 @@ public void testGetConnectionInfo() throws Exception { knownNodes.add(transport3.getLocalDiscoNode()); knownNodes.add(transport2.getLocalDiscoNode()); Collections.shuffle(knownNodes, random()); - List seedNodes = Arrays.asList(node3, node1, node2); + List> seedNodes = Arrays.asList(() -> node3, () -> node1, () -> node2); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -954,6 +969,30 @@ public void testGetConnectionInfo() throws Exception { } } + private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception { + AtomicReference statsRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + connection.getConnectionInfo(new ActionListener() { + @Override + public void onResponse(RemoteConnectionInfo remoteConnectionInfo) { + statsRef.set(remoteConnectionInfo); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + latch.await(); + if (exceptionRef.get() != null) { + throw exceptionRef.get(); + } + return statsRef.get(); + } + public void testRemoteConnectionInfo() throws IOException { RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), @@ -1013,7 +1052,7 @@ private static RemoteConnectionInfo assertSerialization(RemoteConnectionInfo inf RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(in); assertEquals(info, remoteConnectionInfo); assertEquals(info.hashCode(), remoteConnectionInfo.hashCode()); - return randomBoolean() ? info : remoteConnectionInfo; + return randomBoolean() ? info : remoteConnectionInfo; } } @@ -1078,34 +1117,10 @@ public void testRenderConnectionInfoXContent() throws IOException { "\"skip_unavailable\":false}}", Strings.toString(builder)); } - private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception { - AtomicReference statsRef = new AtomicReference<>(); - AtomicReference exceptionRef = new AtomicReference<>(); - CountDownLatch latch = new CountDownLatch(1); - connection.getConnectionInfo(new ActionListener() { - @Override - public void onResponse(RemoteConnectionInfo remoteConnectionInfo) { - statsRef.set(remoteConnectionInfo); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - exceptionRef.set(e); - latch.countDown(); - } - }); - latch.await(); - if (exceptionRef.get() != null) { - throw exceptionRef.get(); - } - return statsRef.get(); - } - public void testEnsureConnected() throws IOException, InterruptedException { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); DiscoveryNode discoverableNode = discoverableTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); @@ -1116,7 +1131,7 @@ public void testEnsureConnected() throws IOException, InterruptedException { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { assertFalse(service.nodeConnected(seedNode)); assertFalse(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); @@ -1165,9 +1180,9 @@ public void testCollectNodes() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) { if (randomBoolean()) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); } CountDownLatch responseLatch = new CountDownLatch(1); AtomicReference> reference = new AtomicReference<>(); @@ -1199,14 +1214,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted List discoverableTransports = new CopyOnWriteArrayList<>(); try { final int numDiscoverableNodes = randomIntBetween(5, 20); - List discoverableNodes = new ArrayList<>(numDiscoverableNodes); + List> discoverableNodes = new ArrayList<>(numDiscoverableNodes); for (int i = 0; i < numDiscoverableNodes; i++ ) { MockTransportService transportService = startTransport("discoverable_node" + i, knownNodes, Version.CURRENT); - discoverableNodes.add(transportService.getLocalDiscoNode()); + discoverableNodes.add(transportService::getLocalDiscoNode); discoverableTransports.add(transportService); } - List seedNodes = randomSubsetOf(discoverableNodes); + List> seedNodes = randomSubsetOf(discoverableNodes); Collections.shuffle(seedNodes, random()); try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { @@ -1255,7 +1270,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted discoverableTransports.add(transportService); connection.addConnectedNode(transportService.getLocalDiscoNode()); } else { - DiscoveryNode node = randomFrom(discoverableNodes); + DiscoveryNode node = randomFrom(discoverableNodes).get(); connection.onNodeDisconnected(node); } } @@ -1285,12 +1300,12 @@ public void testClusterNameIsChecked() throws Exception { Settings settings = Settings.builder().put("cluster.name", "testClusterNameIsChecked").build(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, threadPool, settings); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT, threadPool, - settings); - MockTransportService otherClusterTransport = startTransport("other_cluster_discoverable_node", otherClusterKnownNodes, - Version.CURRENT, threadPool, Settings.builder().put("cluster.name", "otherCluster").build()); - MockTransportService otherClusterDiscoverable= startTransport("other_cluster_discoverable_node", otherClusterKnownNodes, - Version.CURRENT, threadPool, Settings.builder().put("cluster.name", "otherCluster").build())) { + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT, threadPool, + settings); + MockTransportService otherClusterTransport = startTransport("other_cluster_discoverable_node", otherClusterKnownNodes, + Version.CURRENT, threadPool, Settings.builder().put("cluster.name", "otherCluster").build()); + MockTransportService otherClusterDiscoverable = startTransport("other_cluster_discoverable_node", otherClusterKnownNodes, + Version.CURRENT, threadPool, Settings.builder().put("cluster.name", "otherCluster").build())) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); DiscoveryNode discoverableNode = discoverableTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); @@ -1303,12 +1318,13 @@ public void testClusterNameIsChecked() throws Exception { service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { - updateSeedNodes(connection, Arrays.asList(seedNode)); + Arrays.asList( () -> seedNode), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(() -> seedNode)); assertTrue(service.nodeConnected(seedNode)); assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); - List discoveryNodes = Arrays.asList(otherClusterTransport.getLocalDiscoNode(), seedNode); + List> discoveryNodes = + Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode(), () -> seedNode); Collections.shuffle(discoveryNodes, random()); updateSeedNodes(connection, discoveryNodes); assertTrue(service.nodeConnected(seedNode)); @@ -1319,7 +1335,7 @@ public void testClusterNameIsChecked() throws Exception { assertTrue(service.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> - updateSeedNodes(connection, Arrays.asList(otherClusterTransport.getLocalDiscoNode()))); + updateSeedNodes(connection, Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode()))); assertThat(illegalStateException.getMessage(), startsWith("handshake failed, mismatched cluster name [Cluster [otherCluster]]" + " - {other_cluster_discoverable_node}")); @@ -1350,33 +1366,39 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws TransportException { + throws TransportException { // no-op } @Override - public void close() { + public void addCloseListener(ActionListener listener) { // no-op } - }; - service.addDelegate(connectedNode.getAddress(), new MockTransportService.DelegateTransport(service.getOriginalTransport()) { + @Override - public Connection getConnection(DiscoveryNode node) { - if (node == connectedNode) { - return seedConnection; - } - return super.getConnection(node); + public boolean isClosed() { + return false; } @Override - public boolean nodeConnected(DiscoveryNode node) { - return node.equals(connectedNode); + public void close() { + // no-op + } + }; + + service.addNodeConnectedBehavior(connectedNode.getAddress(), (connectionManager, discoveryNode) + -> discoveryNode.equals(connectedNode)); + + service.addGetConnectionBehavior(connectedNode.getAddress(), (connectionManager, discoveryNode) -> { + if (discoveryNode == connectedNode) { + return seedConnection; } + return connectionManager.getConnection(discoveryNode); }); service.start(); service.acceptIncomingRequests(); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - Collections.singletonList(connectedNode), service, Integer.MAX_VALUE, n -> true)) { + Collections.singletonList(() -> connectedNode), service, Integer.MAX_VALUE, n -> true)) { connection.addConnectedNode(connectedNode); for (int i = 0; i < 10; i++) { //always a direct connection as the remote node is already connected @@ -1399,4 +1421,34 @@ public boolean nodeConnected(DiscoveryNode node) { } } } + + public void testLazyResolveTransportAddress() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + CountDownLatch multipleResolveLatch = new CountDownLatch(2); + Supplier seedSupplier = () -> { + multipleResolveLatch.countDown(); + return seedNode; + }; + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true)) { + updateSeedNodes(connection, Arrays.asList(seedSupplier)); + // Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes + // being called again so we try to resolve the same seed node's host twice + discoverableTransport.close(); + seedTransport.close(); + assertTrue(multipleResolveLatch.await(30L, TimeUnit.SECONDS)); + } + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 03d76b5a953c6..c94b1cbdef547 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -103,10 +104,19 @@ public void testRemoteClusterSeedSetting() { .put("search.remote.foo.seeds", "192.168.0.1").build(); expectThrows(IllegalArgumentException.class, () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings))); + + Settings brokenPortSettings = Settings.builder() + .put("search.remote.foo.seeds", "192.168.0.1:123456789123456789").build(); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings) + .forEach(setting -> setting.get(brokenPortSettings)) + ); + assertEquals("failed to parse port", e.getMessage()); } public void testBuiltRemoteClustersSeeds() throws Exception { - Map> map = RemoteClusterService.buildRemoteClustersSeeds( + Map>> map = RemoteClusterService.buildRemoteClustersSeeds( Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080").put("search.remote.bar.seeds", "[::1]:9090").build()); assertEquals(2, map.size()); assertTrue(map.containsKey("foo")); @@ -114,13 +124,13 @@ public void testBuiltRemoteClustersSeeds() throws Exception { assertEquals(1, map.get("foo").size()); assertEquals(1, map.get("bar").size()); - DiscoveryNode foo = map.get("foo").get(0); + DiscoveryNode foo = map.get("foo").get(0).get(); assertEquals(foo.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("192.168.0.1"), 8080))); assertEquals(foo.getId(), "foo#192.168.0.1:8080"); assertEquals(foo.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); - DiscoveryNode bar = map.get("bar").get(0); + DiscoveryNode bar = map.get("bar").get(0).get(); assertEquals(bar.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("[::1]"), 9090))); assertEquals(bar.getId(), "bar#[::1]:9090"); assertEquals(bar.getVersion(), Version.CURRENT.minimumCompatibilityVersion()); @@ -194,10 +204,10 @@ public void testIncrementallyAddClusters() throws IOException { assertFalse(service.isCrossClusterSearchEnabled()); service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().address())); + service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString())); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); - service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().address())); + service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString())); assertTrue(service.isCrossClusterSearchEnabled()); assertTrue(service.isRemoteClusterRegistered("cluster_1")); assertTrue(service.isRemoteClusterRegistered("cluster_2")); @@ -252,22 +262,17 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), connectionListener(secondLatch)); secondLatch.await(); @@ -321,22 +326,17 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), connectionListener(secondLatch)); secondLatch.await(); @@ -398,22 +398,17 @@ public void testCollectNodes() throws InterruptedException, IOException { service.initializeRemoteClusters(); assertFalse(service.isCrossClusterSearchEnabled()); - final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); - final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); - final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); - final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); - final CountDownLatch firstLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_1", - Arrays.asList(c1N1Address, c1N2Address), + Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()), connectionListener(firstLatch)); firstLatch.await(); final CountDownLatch secondLatch = new CountDownLatch(1); service.updateRemoteCluster( "cluster_2", - Arrays.asList(c2N1Address, c2N2Address), + Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()), connectionListener(secondLatch)); secondLatch.await(); CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index e361dc7894308..1e5bf02e66626 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -192,7 +192,11 @@ protected FakeChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeo } @Override - public NodeChannels getConnection(DiscoveryNode node) { + protected void stopInternal() { + } + + @Override + public NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { int numConnections = MockTcpTransport.LIGHT_PROFILE.getNumConnections(); ArrayList fakeChannels = new ArrayList<>(numConnections); for (int i = 0; i < numConnections; ++i) { @@ -203,7 +207,7 @@ public NodeChannels getConnection(DiscoveryNode node) { }; DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); - Transport.Connection connection = transport.getConnection(node); + Transport.Connection connection = transport.openConnection(node, null); connection.sendRequest(42, "foobar", request, TransportRequestOptions.EMPTY); BytesReference reference = messageCaptor.get(); @@ -288,71 +292,4 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(value); } } - - public void testConnectionProfileResolve() { - final ConnectionProfile defaultProfile = TcpTransport.buildDefaultConnectionProfile(Settings.EMPTY); - assertEquals(defaultProfile, TcpTransport.resolveConnectionProfile(null, defaultProfile)); - - final ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); - builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.BULK); - builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.RECOVERY); - builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.REG); - builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.STATE); - builder.addConnections(randomIntBetween(0, 5), TransportRequestOptions.Type.PING); - - final boolean connectionTimeoutSet = randomBoolean(); - if (connectionTimeoutSet) { - builder.setConnectTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); - } - final boolean connectionHandshakeSet = randomBoolean(); - if (connectionHandshakeSet) { - builder.setHandshakeTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); - } - - final ConnectionProfile profile = builder.build(); - final ConnectionProfile resolved = TcpTransport.resolveConnectionProfile(profile, defaultProfile); - assertNotEquals(resolved, defaultProfile); - assertThat(resolved.getNumConnections(), equalTo(profile.getNumConnections())); - assertThat(resolved.getHandles(), equalTo(profile.getHandles())); - - assertThat(resolved.getConnectTimeout(), - equalTo(connectionTimeoutSet ? profile.getConnectTimeout() : defaultProfile.getConnectTimeout())); - assertThat(resolved.getHandshakeTimeout(), - equalTo(connectionHandshakeSet ? profile.getHandshakeTimeout() : defaultProfile.getHandshakeTimeout())); - } - - public void testDefaultConnectionProfile() { - ConnectionProfile profile = TcpTransport.buildDefaultConnectionProfile(Settings.EMPTY); - assertEquals(13, profile.getNumConnections()); - assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); - assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); - assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); - assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); - assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); - - profile = TcpTransport.buildDefaultConnectionProfile(Settings.builder().put("node.master", false).build()); - assertEquals(12, profile.getNumConnections()); - assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); - assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); - assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); - assertEquals(2, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); - assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); - - profile = TcpTransport.buildDefaultConnectionProfile(Settings.builder().put("node.data", false).build()); - assertEquals(11, profile.getNumConnections()); - assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); - assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); - assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); - assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); - assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); - - profile = TcpTransport.buildDefaultConnectionProfile(Settings.builder().put("node.data", false).put("node.master", false).build()); - assertEquals(10, profile.getNumConnections()); - assertEquals(1, profile.getNumConnectionsPerType(TransportRequestOptions.Type.PING)); - assertEquals(6, profile.getNumConnectionsPerType(TransportRequestOptions.Type.REG)); - assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.STATE)); - assertEquals(0, profile.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY)); - assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); - } - } diff --git a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java index 76cfcce033b11..44b845e5c2cac 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/EqualsHashCodeTestUtils.java @@ -33,7 +33,7 @@ */ public class EqualsHashCodeTestUtils { - private static Object[] someObjects = new Object[] { "some string", new Integer(1), new Double(1.0) }; + private static Object[] someObjects = new Object[] { "some string", Integer.valueOf(1), Double.valueOf(1.0) }; /** * A function that makes a copy of its input argument diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 30f559db8516b..0a5f29414d6e8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -388,9 +388,9 @@ private Settings getRandomNodeSettings(long seed) { // randomize tcp settings if (random.nextBoolean()) { - builder.put(TcpTransport.CONNECTIONS_PER_NODE_RECOVERY.getKey(), random.nextInt(2) + 1); - builder.put(TcpTransport.CONNECTIONS_PER_NODE_BULK.getKey(), random.nextInt(3) + 1); - builder.put(TcpTransport.CONNECTIONS_PER_NODE_REG.getKey(), random.nextInt(6) + 1); + builder.put(TransportService.CONNECTIONS_PER_NODE_RECOVERY.getKey(), random.nextInt(2) + 1); + builder.put(TransportService.CONNECTIONS_PER_NODE_BULK.getKey(), random.nextInt(3) + 1); + builder.put(TransportService.CONNECTIONS_PER_NODE_REG.getKey(), random.nextInt(6) + 1); } if (random.nextBoolean()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index ffdf79c0636b2..a7d3e85d3009d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -20,18 +20,22 @@ package org.elasticsearch.test.transport; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectionManager; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.RequestHandlerRegistry; @@ -39,13 +43,14 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportStats; import java.io.IOException; -import java.io.UncheckedIOException; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collection; @@ -53,9 +58,11 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; import static org.apache.lucene.util.LuceneTestCase.rarely; @@ -84,6 +91,45 @@ public CapturedRequest(DiscoveryNode node, long requestId, String action, Transp private ConcurrentMap> requests = new ConcurrentHashMap<>(); private BlockingQueue capturedRequests = ConcurrentCollections.newBlockingQueue(); + public TransportService createCapturingTransportService(Settings settings, ThreadPool threadPool, TransportInterceptor interceptor, + Function localNodeFactory, + @Nullable ClusterSettings clusterSettings, Set taskHeaders) { + StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ConnectionManager(settings, this, threadPool), + settings, this, threadPool); + connectionManager.setDefaultNodeConnectedBehavior((cm, discoveryNode) -> true); + connectionManager.setDefaultConnectBehavior((cm, discoveryNode) -> new Connection() { + @Override + public DiscoveryNode getNode() { + return discoveryNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws TransportException { + requests.put(requestId, Tuple.tuple(discoveryNode, action)); + capturedRequests.add(new CapturedRequest(discoveryNode, requestId, action, request)); + } + + @Override + public void addCloseListener(ActionListener listener) { + + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void close() { + + } + }); + return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, + connectionManager); + + } + /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */ public CapturedRequest[] capturedRequests() { return capturedRequests.toArray(new CapturedRequest[0]); @@ -195,7 +241,7 @@ public void handleError(final long requestId, final TransportException e) { } @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { + public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) { return new Connection() { @Override public DiscoveryNode getNode() { @@ -204,13 +250,23 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { + throws TransportException { requests.put(requestId, Tuple.tuple(node, action)); capturedRequests.add(new CapturedRequest(node, requestId, action, request)); } @Override - public void close() throws IOException { + public void addCloseListener(ActionListener listener) { + + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void close() { } }; @@ -236,23 +292,6 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi return new TransportAddress[0]; } - @Override - public boolean nodeConnected(DiscoveryNode node) { - return true; - } - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - - } - @Override public Lifecycle.State lifecycleState() { return null; @@ -282,14 +321,6 @@ public List getLocalAddresses() { return Collections.emptyList(); } - public Connection getConnection(DiscoveryNode node) { - try { - return openConnection(node, null); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - @Override public void registerRequestHandler(RequestHandlerRegistry reg) { synchronized (requestHandlerMutex) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 7f818de29d430..15ab06d651e92 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,15 +20,12 @@ package org.elasticsearch.test.transport; import com.carrotsearch.randomizedtesting.SysGlobals; - import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -40,7 +37,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -48,21 +44,18 @@ import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.ConnectionManager; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.MockTcpTransport; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportConnectionListener; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportStats; import java.io.IOException; -import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -71,18 +64,18 @@ import java.util.Map; import java.util.Queue; import java.util.Set; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import java.util.function.Supplier; /** - * A mock transport service that allows to simulate different network topology failures. + * A mock delegate service that allows to simulate different network topology failures. * Internally it maps TransportAddress objects to rules that inject failures. * Adding rules for a node is done by adding rules for all bound addresses of a node * (and the publish address, if different). - * Matching requests to rules is based on the transport address associated with the + * Matching requests to rules is based on the delegate address associated with the * discovery node of the request, namely by DiscoveryNode.getAddress(). * This address is usually the publish address of the node but can also be a different one * (for example, @see org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing, which constructs @@ -147,9 +140,15 @@ public MockTransportService(Settings settings, Transport transport, ThreadPool t public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders) { - super(settings, new LookupTestTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, - taskHeaders); - this.original = transport; + this(settings, new StubbableTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + } + + private MockTransportService(Settings settings, StubbableTransport transport, ThreadPool threadPool, TransportInterceptor interceptor, + Function localNodeFactory, + @Nullable ClusterSettings clusterSettings, Set taskHeaders) { + super(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, + new StubbableConnectionManager(new ConnectionManager(settings, transport, threadPool), settings, transport, threadPool)); + this.original = transport.getDelegate(); } public static TransportAddress[] extractTransportAddresses(TransportService transportService) { @@ -173,11 +172,12 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool * Clears all the registered rules. */ public void clearAllRules() { - transport().transports.clear(); + transport().clearBehaviors(); + connectionManager().clearBehaviors(); } /** - * Clears the rule associated with the provided transport service. + * Clears the rule associated with the provided delegate service. */ public void clearRule(TransportService transportService) { for (TransportAddress transportAddress : extractTransportAddresses(transportService)) { @@ -186,20 +186,11 @@ public void clearRule(TransportService transportService) { } /** - * Clears the rule associated with the provided transport address. + * Clears the rule associated with the provided delegate address. */ public void clearRule(TransportAddress transportAddress) { - Transport transport = transport().transports.remove(transportAddress); - if (transport instanceof ClearableTransport) { - ((ClearableTransport) transport).clearRule(); - } - } - - /** - * Returns the original Transport service wrapped by this mock transport service. - */ - public Transport original() { - return original; + transport().clearBehavior(transportAddress); + connectionManager().clearBehavior(transportAddress); } /** @@ -217,30 +208,14 @@ public void addFailToSendNoConnectRule(TransportService transportService) { * is added to fail as well. */ public void addFailToSendNoConnectRule(TransportAddress transportAddress) { - addDelegate(transportAddress, new DelegateTransport(original) { - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - if (original.nodeConnected(node) == false) { - // connecting to an already connected node is a no-op - throw new ConnectTransportException(node, "DISCONNECT: simulated"); - } - } - - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - throw new ConnectTransportException(node, "DISCONNECT: simulated"); - } + transport().addConnectBehavior(transportAddress, (transport, discoveryNode, profile) -> { + throw new ConnectTransportException(discoveryNode, "DISCONNECT: simulated"); + }); - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - connection.close(); - // send the request, which will blow up - connection.sendRequest(requestId, action, request, options); - } + transport().addSendBehavior(transportAddress, (connection, requestId, action, request, options) -> { + connection.close(); + // send the request, which will blow up + connection.sendRequest(requestId, action, request, options); }); } @@ -271,18 +246,12 @@ public void addFailToSendNoConnectRule(TransportService transportService, final * Adds a rule that will cause matching operations to throw ConnectTransportExceptions */ public void addFailToSendNoConnectRule(TransportAddress transportAddress, final Set blockedActions) { - - addDelegate(transportAddress, new DelegateTransport(original) { - - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - if (blockedActions.contains(action)) { - logger.info("--> preventing {} request", action); - connection.close(); - } - connection.sendRequest(requestId, action, request, options); + transport().addSendBehavior(transportAddress, (connection, requestId, action, request, options) -> { + if (blockedActions.contains(action)) { + logger.info("--> preventing {} request", action); + connection.close(); } + connection.sendRequest(requestId, action, request, options); }); } @@ -301,28 +270,12 @@ public void addUnresponsiveRule(TransportService transportService) { * and failing to connect once the rule was added. */ public void addUnresponsiveRule(TransportAddress transportAddress) { - addDelegate(transportAddress, new DelegateTransport(original) { - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - if (original.nodeConnected(node) == false) { - // connecting to an already connected node is a no-op - throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); - } - } - - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); - } + transport().addConnectBehavior(transportAddress, (transport, discoveryNode, profile) -> { + throw new ConnectTransportException(discoveryNode, "UNRESPONSIVE: simulated"); + }); - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - // don't send anything, the receiving node is unresponsive - } + transport().addSendBehavior(transportAddress, (connection, requestId, action, request, options) -> { + // don't send anything, the receiving node is unresponsive }); } @@ -347,70 +300,38 @@ public void addUnresponsiveRule(TransportService transportService, final TimeVal public void addUnresponsiveRule(TransportAddress transportAddress, final TimeValue duration) { final long startTime = System.currentTimeMillis(); - addDelegate(transportAddress, new ClearableTransport(original) { - private final Queue requestsToSendWhenCleared = new LinkedBlockingDeque<>(); - private boolean cleared = false; + Supplier delaySupplier = () -> new TimeValue(duration.millis() - (System.currentTimeMillis() - startTime)); - TimeValue getDelay() { - return new TimeValue(duration.millis() - (System.currentTimeMillis() - startTime)); + transport().addConnectBehavior(transportAddress, (transport, discoveryNode, profile) -> { + TimeValue delay = delaySupplier.get(); + if (delay.millis() <= 0) { + return original.openConnection(discoveryNode, profile); } - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - if (original.nodeConnected(node)) { - // connecting to an already connected node is a no-op - return; - } - TimeValue delay = getDelay(); - if (delay.millis() <= 0) { - original.connectToNode(node, connectionProfile, connectionValidator); - return; - } - - // TODO: Replace with proper setting - TimeValue connectingTimeout = TcpTransport.TCP_CONNECT_TIMEOUT.getDefault(Settings.EMPTY); - try { - if (delay.millis() < connectingTimeout.millis()) { - Thread.sleep(delay.millis()); - original.connectToNode(node, connectionProfile, connectionValidator); - } else { - Thread.sleep(connectingTimeout.millis()); - throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); - } - } catch (InterruptedException e) { - throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); + // TODO: Replace with proper setting + TimeValue connectingTimeout = TransportService.TCP_CONNECT_TIMEOUT.getDefault(Settings.EMPTY); + try { + if (delay.millis() < connectingTimeout.millis()) { + Thread.sleep(delay.millis()); + return original.openConnection(discoveryNode, profile); + } else { + Thread.sleep(connectingTimeout.millis()); + throw new ConnectTransportException(discoveryNode, "UNRESPONSIVE: simulated"); } + } catch (InterruptedException e) { + throw new ConnectTransportException(discoveryNode, "UNRESPONSIVE: simulated"); } + }); - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - TimeValue delay = getDelay(); - if (delay.millis() <= 0) { - return original.openConnection(node, profile); - } - - // TODO: Replace with proper setting - TimeValue connectingTimeout = TcpTransport.TCP_CONNECT_TIMEOUT.getDefault(Settings.EMPTY); - try { - if (delay.millis() < connectingTimeout.millis()) { - Thread.sleep(delay.millis()); - return original.openConnection(node, profile); - } else { - Thread.sleep(connectingTimeout.millis()); - throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); - } - } catch (InterruptedException e) { - throw new ConnectTransportException(node, "UNRESPONSIVE: simulated"); - } - } + transport().addSendBehavior(transportAddress, new StubbableTransport.SendRequestBehavior() { + private final Queue requestsToSendWhenCleared = new LinkedBlockingDeque<>(); + private boolean cleared = false; @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { + public void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException { // delayed sending - even if larger then the request timeout to simulated a potential late response from target node - TimeValue delay = getDelay(); + TimeValue delay = delaySupplier.get(); if (delay.millis() <= 0) { connection.sendRequest(requestId, action, request, options); return; @@ -450,7 +371,7 @@ protected void doRun() throws IOException { } @Override - public void clearRule() { + public void clearCallback() { synchronized (this) { assert cleared == false; cleared = true; @@ -461,234 +382,128 @@ public void clearRule() { } /** - * Adds a new delegate transport that is used for communication with the given transport service. + * Adds a new send behavior that is used for communication with the given delegate service. * - * @return {@code true} iff no other delegate was registered for any of the addresses bound by transport service. + * @return {@code true} if no other send behavior was registered for any of the addresses bound by delegate service. */ - public boolean addDelegate(TransportService transportService, DelegateTransport transport) { + public boolean addSendBehavior(TransportService transportService, StubbableTransport.SendRequestBehavior sendBehavior) { boolean noRegistered = true; for (TransportAddress transportAddress : extractTransportAddresses(transportService)) { - noRegistered &= addDelegate(transportAddress, transport); + noRegistered &= addSendBehavior(transportAddress, sendBehavior); } return noRegistered; } /** - * Adds a new delegate transport that is used for communication with the given transport address. + * Adds a new send behavior that is used for communication with the given delegate address. * - * @return {@code true} iff no other delegate was registered for this address before. + * @return {@code true} if no other send behavior was registered for this address before. */ - public boolean addDelegate(TransportAddress transportAddress, DelegateTransport transport) { - return transport().transports.put(transportAddress, transport) == null; - } - - private LookupTestTransport transport() { - return (LookupTestTransport) transport; + public boolean addSendBehavior(TransportAddress transportAddress, StubbableTransport.SendRequestBehavior sendBehavior) { + return transport().addSendBehavior(transportAddress, sendBehavior); } /** - * A lookup transport that has a list of potential Transport implementations to delegate to for node operations, - * if none is registered, then the default one is used. + * Adds a send behavior that is the default send behavior. + * + * @return {@code true} if no default send behavior was registered */ - private static class LookupTestTransport extends DelegateTransport { - - final ConcurrentMap transports = ConcurrentCollections.newConcurrentMap(); - - LookupTestTransport(Transport transport) { - super(transport); - } - - private Transport getTransport(DiscoveryNode node) { - Transport transport = transports.get(node.getAddress()); - if (transport != null) { - return transport; - } - return this.transport; - } - - @Override - public boolean nodeConnected(DiscoveryNode node) { - return getTransport(node).nodeConnected(node); - } - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - getTransport(node).connectToNode(node, connectionProfile, connectionValidator); - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - getTransport(node).disconnectFromNode(node); - } + public boolean addSendBehavior(StubbableTransport.SendRequestBehavior behavior) { + return transport().setDefaultSendBehavior(behavior); + } - @Override - public Connection getConnection(DiscoveryNode node) { - return getTransport(node).getConnection(node); - } - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - return getTransport(node).openConnection(node, profile); + /** + * Adds a new connect behavior that is used for creating connections with the given delegate service. + * + * @return {@code true} if no other send behavior was registered for any of the addresses bound by delegate service. + */ + public boolean addConnectBehavior(TransportService transportService, StubbableTransport.OpenConnectionBehavior connectBehavior) { + boolean noRegistered = true; + for (TransportAddress transportAddress : extractTransportAddresses(transportService)) { + noRegistered &= addConnectBehavior(transportAddress, connectBehavior); } + return noRegistered; } /** - * A pure delegate transport. - * Can be extracted to a common class if needed in other places in the codebase. + * Adds a new connect behavior that is used for creating connections with the given delegate address. + * + * @return {@code true} if no other send behavior was registered for this address before. */ - public static class DelegateTransport implements Transport { - - protected final Transport transport; - - - public DelegateTransport(Transport transport) { - this.transport = transport; - } - - @Override - public void addConnectionListener(TransportConnectionListener listener) { - transport.addConnectionListener(listener); - } - - @Override - public boolean removeConnectionListener(TransportConnectionListener listener) { - return transport.removeConnectionListener(listener); - } - - @Override - public void registerRequestHandler(RequestHandlerRegistry reg) { - transport.registerRequestHandler(reg); - } - - @Override - public RequestHandlerRegistry getRequestHandler(String action) { - return transport.getRequestHandler(action); - } - - @Override - public BoundTransportAddress boundAddress() { - return transport.boundAddress(); - } - - @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - return transport.addressesFromString(address, perAddressLimit); - } - - @Override - public boolean nodeConnected(DiscoveryNode node) { - return transport.nodeConnected(node); - } - - @Override - public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, - CheckedBiConsumer connectionValidator) - throws ConnectTransportException { - transport.connectToNode(node, connectionProfile, connectionValidator); - } - - @Override - public void disconnectFromNode(DiscoveryNode node) { - transport.disconnectFromNode(node); - } - - @Override - public List getLocalAddresses() { - return transport.getLocalAddresses(); - } - - @Override - public Connection getConnection(DiscoveryNode node) { - return new FilteredConnection(transport.getConnection(node)) { - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - DelegateTransport.this.sendRequest(connection, requestId, action, request, options); - } - }; - } - - @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - return new FilteredConnection(transport.openConnection(node, profile)) { - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - DelegateTransport.this.sendRequest(connection, requestId, action, request, options); - } - }; - } - - @Override - public TransportStats getStats() { - return transport.getStats(); - } - - @Override - public ResponseHandlers getResponseHandlers() { - return transport.getResponseHandlers(); - } - - @Override - public Lifecycle.State lifecycleState() { - return transport.lifecycleState(); - } - - @Override - public void addLifecycleListener(LifecycleListener listener) { - transport.addLifecycleListener(listener); - } - - @Override - public void removeLifecycleListener(LifecycleListener listener) { - transport.removeLifecycleListener(listener); - } - - @Override - public void start() { - transport.start(); - } + public boolean addConnectBehavior(TransportAddress transportAddress, StubbableTransport.OpenConnectionBehavior connectBehavior) { + return transport().addConnectBehavior(transportAddress, connectBehavior); + } - @Override - public void stop() { - transport.stop(); + /** + * Adds a new get connection behavior that is used for communication with the given delegate service. + * + * @return {@code true} if no other get connection behavior was registered for any of the addresses bound by delegate service. + */ + public boolean addGetConnectionBehavior(TransportService transportService, StubbableConnectionManager.GetConnectionBehavior behavior) { + boolean noRegistered = true; + for (TransportAddress transportAddress : extractTransportAddresses(transportService)) { + noRegistered &= addGetConnectionBehavior(transportAddress, behavior); } + return noRegistered; + } - @Override - public void close() { - transport.close(); - } + /** + * Adds a get connection behavior that is used for communication with the given delegate address. + * + * @return {@code true} if no other get connection behavior was registered for this address before. + */ + public boolean addGetConnectionBehavior(TransportAddress transportAddress, StubbableConnectionManager.GetConnectionBehavior behavior) { + return connectionManager().addConnectBehavior(transportAddress, behavior); + } - @Override - public Map profileBoundAddresses() { - return transport.profileBoundAddresses(); - } + /** + * Adds a get connection behavior that is the default get connection behavior. + * + * @return {@code true} if no default get connection behavior was registered. + */ + public boolean addGetConnectionBehavior(StubbableConnectionManager.GetConnectionBehavior behavior) { + return connectionManager().setDefaultConnectBehavior(behavior); + } - protected void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - connection.sendRequest(requestId, action, request, options); + /** + * Adds a node connected behavior that is used for the given delegate service. + * + * @return {@code true} if no other node connected behavior was registered for any of the addresses bound by delegate service. + */ + public boolean addNodeConnectedBehavior(TransportService transportService, StubbableConnectionManager.NodeConnectedBehavior behavior) { + boolean noRegistered = true; + for (TransportAddress transportAddress : extractTransportAddresses(transportService)) { + noRegistered &= addNodeConnectedBehavior(transportAddress, behavior); } + return noRegistered; } /** - * The delegate transport instances defined in this class mock various kinds of disruption types. This subclass adds a method - * {@link #clearRule()} so that when the disruptions are cleared (see {@link #clearRule(TransportService)}) this gives the - * disruption a possibility to run clean-up actions. + * Adds a node connected behavior that is used for the given delegate address. + * + * @return {@code true} if no other node connected behavior was registered for this address before. */ - public abstract static class ClearableTransport extends DelegateTransport { + public boolean addNodeConnectedBehavior(TransportAddress transportAddress, StubbableConnectionManager.NodeConnectedBehavior behavior) { + return connectionManager().addNodeConnectedBehavior(transportAddress, behavior); + } - public ClearableTransport(Transport transport) { - super(transport); - } + /** + * Adds a node connected behavior that is the default node connected behavior. + * + * @return {@code true} if no default node connected behavior was registered. + */ + public boolean addNodeConnectedBehavior(StubbableConnectionManager.NodeConnectedBehavior behavior) { + return connectionManager().setDefaultNodeConnectedBehavior(behavior); + } - /** - * Called by {@link #clearRule(TransportService)} - */ - public abstract void clearRule(); + public StubbableTransport transport() { + return (StubbableTransport) transport; } + public StubbableConnectionManager connectionManager() { + return (StubbableConnectionManager) connectionManager; + } List activeTracers = new CopyOnWriteArrayList<>(); @@ -766,78 +581,36 @@ protected void traceRequestSent(DiscoveryNode node, long requestId, String actio } } - private static class FilteredConnection implements Transport.Connection { - protected final Transport.Connection connection; - - private FilteredConnection(Transport.Connection connection) { - this.connection = connection; - } - - @Override - public DiscoveryNode getNode() { - return connection.getNode(); - } - - @Override - public Version getVersion() { - return connection.getVersion(); - } - - @Override - public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { - connection.sendRequest(requestId, action, request, options); - } - - @Override - public void close() throws IOException { - connection.close(); - } - - @Override - public Object getCacheKey() { - return connection.getCacheKey(); - } - } - public Transport getOriginalTransport() { Transport transport = transport(); - while (transport instanceof DelegateTransport) { - transport = ((DelegateTransport) transport).transport; + while (transport instanceof StubbableTransport) { + transport = ((StubbableTransport) transport).getDelegate(); } return transport; } @Override public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - FilteredConnection filteredConnection = new FilteredConnection(super.openConnection(node, profile)) { - final AtomicBoolean closed = new AtomicBoolean(false); - - @Override - public void close() throws IOException { - try { - super.close(); - } finally { - if (closed.compareAndSet(false, true)) { - synchronized (openConnections) { - List connections = openConnections.get(node); - boolean remove = connections.remove(this); - assert remove; - if (connections.isEmpty()) { - openConnections.remove(node); - } - } - } - } + Transport.Connection connection = super.openConnection(node, profile); - } - }; synchronized (openConnections) { List connections = openConnections.computeIfAbsent(node, (n) -> new CopyOnWriteArrayList<>()); - connections.add(filteredConnection); + connections.add(connection); } - return filteredConnection; + + connection.addCloseListener(ActionListener.wrap(() -> { + synchronized (openConnections) { + List connections = openConnections.get(node); + boolean remove = connections.remove(connection); + assert remove : "Should have removed connection"; + if (connections.isEmpty()) { + openConnections.remove(node); + } + } + })); + + return connection; } @Override @@ -851,4 +624,5 @@ protected void doClose() throws IOException { public DiscoveryNode getLocalDiscoNode() { return this.getLocalNode(); } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java new file mode 100644 index 0000000000000..486ccc805d055 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.transport; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.ConnectionManager; +import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; + +import java.io.IOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +public class StubbableConnectionManager extends ConnectionManager { + + private final ConnectionManager delegate; + private final ConcurrentMap getConnectionBehaviors; + private final ConcurrentMap nodeConnectedBehaviors; + private volatile GetConnectionBehavior defaultGetConnectionBehavior = ConnectionManager::getConnection; + private volatile NodeConnectedBehavior defaultNodeConnectedBehavior = ConnectionManager::nodeConnected; + + public StubbableConnectionManager(ConnectionManager delegate, Settings settings, Transport transport, ThreadPool threadPool) { + super(settings, transport, threadPool); + this.delegate = delegate; + this.getConnectionBehaviors = new ConcurrentHashMap<>(); + this.nodeConnectedBehaviors = new ConcurrentHashMap<>(); + } + + public boolean addConnectBehavior(TransportAddress transportAddress, GetConnectionBehavior connectBehavior) { + return getConnectionBehaviors.put(transportAddress, connectBehavior) == null; + } + + public boolean setDefaultConnectBehavior(GetConnectionBehavior behavior) { + GetConnectionBehavior prior = defaultGetConnectionBehavior; + defaultGetConnectionBehavior = behavior; + return prior == null; + } + + public boolean addNodeConnectedBehavior(TransportAddress transportAddress, NodeConnectedBehavior behavior) { + return nodeConnectedBehaviors.put(transportAddress, behavior) == null; + } + + public boolean setDefaultNodeConnectedBehavior(NodeConnectedBehavior behavior) { + NodeConnectedBehavior prior = defaultNodeConnectedBehavior; + defaultNodeConnectedBehavior = behavior; + return prior == null; + } + + public void clearBehaviors() { + getConnectionBehaviors.clear(); + nodeConnectedBehaviors.clear(); + } + + public void clearBehavior(TransportAddress transportAddress) { + getConnectionBehaviors.remove(transportAddress); + nodeConnectedBehaviors.remove(transportAddress); + } + + @Override + public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) { + return delegate.openConnection(node, connectionProfile); + } + + @Override + public Transport.Connection getConnection(DiscoveryNode node) { + TransportAddress address = node.getAddress(); + GetConnectionBehavior behavior = getConnectionBehaviors.getOrDefault(address, defaultGetConnectionBehavior); + return behavior.getConnection(delegate, node); + } + + @Override + public boolean nodeConnected(DiscoveryNode node) { + TransportAddress address = node.getAddress(); + NodeConnectedBehavior behavior = nodeConnectedBehaviors.getOrDefault(address, defaultNodeConnectedBehavior); + return behavior.nodeConnected(delegate, node); + } + + @Override + public void addListener(TransportConnectionListener listener) { + delegate.addListener(listener); + } + + @Override + public void removeListener(TransportConnectionListener listener) { + delegate.removeListener(listener); + } + + @Override + public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, + CheckedBiConsumer connectionValidator) + throws ConnectTransportException { + delegate.connectToNode(node, connectionProfile, connectionValidator); + } + + @Override + public void disconnectFromNode(DiscoveryNode node) { + delegate.disconnectFromNode(node); + } + + @Override + public int connectedNodeCount() { + return delegate.connectedNodeCount(); + } + + @Override + public void close() { + delegate.close(); + } + + @FunctionalInterface + public interface GetConnectionBehavior { + Transport.Connection getConnection(ConnectionManager connectionManager, DiscoveryNode discoveryNode); + } + + @FunctionalInterface + public interface NodeConnectedBehavior { + boolean nodeConnected(ConnectionManager connectionManager, DiscoveryNode discoveryNode); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java new file mode 100644 index 0000000000000..5a0dd3b7f6d53 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.RequestHandlerRegistry; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportStats; + +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class StubbableTransport implements Transport { + + private final ConcurrentHashMap sendBehaviors = new ConcurrentHashMap<>(); + private final ConcurrentHashMap connectBehaviors = new ConcurrentHashMap<>(); + private volatile SendRequestBehavior defaultSendRequest = null; + private volatile OpenConnectionBehavior defaultConnectBehavior = null; + private final Transport delegate; + + + public StubbableTransport(Transport transport) { + this.delegate = transport; + } + + boolean setDefaultSendBehavior(SendRequestBehavior sendBehavior) { + SendRequestBehavior prior = defaultSendRequest; + defaultSendRequest = sendBehavior; + return prior == null; + } + + boolean addSendBehavior(TransportAddress transportAddress, SendRequestBehavior sendBehavior) { + return sendBehaviors.put(transportAddress, sendBehavior) == null; + } + + boolean addConnectBehavior(TransportAddress transportAddress, OpenConnectionBehavior connectBehavior) { + return connectBehaviors.put(transportAddress, connectBehavior) == null; + } + + void clearBehaviors() { + sendBehaviors.clear(); + connectBehaviors.clear(); + } + + void clearBehavior(TransportAddress transportAddress) { + SendRequestBehavior behavior = sendBehaviors.remove(transportAddress); + if (behavior != null) { + behavior.clearCallback(); + } + connectBehaviors.remove(transportAddress); + } + + Transport getDelegate() { + return delegate; + } + + @Override + public void addConnectionListener(TransportConnectionListener listener) { + delegate.addConnectionListener(listener); + } + + @Override + public boolean removeConnectionListener(TransportConnectionListener listener) { + return delegate.removeConnectionListener(listener); + } + + @Override + public void registerRequestHandler(RequestHandlerRegistry reg) { + delegate.registerRequestHandler(reg); + } + + @Override + public RequestHandlerRegistry getRequestHandler(String action) { + return delegate.getRequestHandler(action); + } + + @Override + public BoundTransportAddress boundAddress() { + return delegate.boundAddress(); + } + + @Override + public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + return delegate.addressesFromString(address, perAddressLimit); + } + + @Override + public List getLocalAddresses() { + return delegate.getLocalAddresses(); + } + + @Override + public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) { + TransportAddress address = node.getAddress(); + OpenConnectionBehavior behavior = connectBehaviors.getOrDefault(address, defaultConnectBehavior); + Connection connection; + if (behavior == null) { + connection = delegate.openConnection(node, profile); + } else { + connection = behavior.openConnection(delegate, node, profile); + } + + return new WrappedConnection(connection); + } + + @Override + public TransportStats getStats() { + return delegate.getStats(); + } + + @Override + public Transport.ResponseHandlers getResponseHandlers() { + return delegate.getResponseHandlers(); + } + + @Override + public Lifecycle.State lifecycleState() { + return delegate.lifecycleState(); + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + delegate.addLifecycleListener(listener); + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + delegate.removeLifecycleListener(listener); + } + + @Override + public void start() { + delegate.start(); + } + + @Override + public void stop() { + delegate.stop(); + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public Map profileBoundAddresses() { + return delegate.profileBoundAddresses(); + } + + private class WrappedConnection implements Transport.Connection { + + private final Transport.Connection connection; + + private WrappedConnection(Transport.Connection connection) { + this.connection = connection; + } + + @Override + public DiscoveryNode getNode() { + return connection.getNode(); + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + TransportAddress address = connection.getNode().getAddress(); + SendRequestBehavior behavior = sendBehaviors.getOrDefault(address, defaultSendRequest); + if (behavior == null) { + connection.sendRequest(requestId, action, request, options); + } else { + behavior.sendRequest(connection, requestId, action, request, options); + } + } + + @Override + public boolean sendPing() { + return connection.sendPing(); + } + + @Override + public void addCloseListener(ActionListener listener) { + connection.addCloseListener(listener); + } + + + @Override + public boolean isClosed() { + return connection.isClosed(); + } + + @Override + public Version getVersion() { + return connection.getVersion(); + } + + @Override + public Object getCacheKey() { + return connection.getCacheKey(); + } + + @Override + public void close() { + connection.close(); + } + } + + @FunctionalInterface + public interface OpenConnectionBehavior { + Connection openConnection(Transport transport, DiscoveryNode discoveryNode, ConnectionProfile profile); + } + + @FunctionalInterface + public interface SendRequestBehavior { + void sendRequest(Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException; + + default void clearCallback() { + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 85c4d478e1887..1d0c6ea03e388 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -2007,11 +2007,11 @@ protected String handleRequest(TcpChannel mockChannel, String profileName, Strea assertEquals("handshake failed", exception.getCause().getMessage()); } + ConnectionProfile connectionProfile = ConnectionManager.buildDefaultConnectionProfile(Settings.EMPTY); try (TransportService service = buildService("TS_TPC", Version.CURRENT, null); TcpTransport.NodeChannels connection = originalTransport.openConnection( new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0), - null - )) { + connectionProfile)) { Version version = originalTransport.executeHandshake(connection.getNode(), connection.channel(TransportRequestOptions.Type.PING), TimeValue.timeValueSeconds(10)); assertEquals(version, Version.CURRENT); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 71028a9883cb7..4be7e5668714c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -198,12 +198,11 @@ protected MockChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeo } @Override - protected ConnectionProfile resolveConnectionProfile(ConnectionProfile connectionProfile) { - ConnectionProfile connectionProfile1 = resolveConnectionProfile(connectionProfile, defaultConnectionProfile); + protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile connectionProfile) { ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); Set allTypesWithConnection = new HashSet<>(); Set allTypesWithoutConnection = new HashSet<>(); - for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile1.getHandles()) { + for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile.getHandles()) { Set types = handle.getTypes(); if (handle.length > 0) { allTypesWithConnection.addAll(types); @@ -216,8 +215,8 @@ protected ConnectionProfile resolveConnectionProfile(ConnectionProfile connectio if (allTypesWithoutConnection.isEmpty() == false) { builder.addConnections(0, allTypesWithoutConnection.toArray(new TransportRequestOptions.Type[0])); } - builder.setHandshakeTimeout(connectionProfile1.getHandshakeTimeout()); - builder.setConnectTimeout(connectionProfile1.getConnectTimeout()); + builder.setHandshakeTimeout(connectionProfile.getHandshakeTimeout()); + builder.setConnectTimeout(connectionProfile.getConnectTimeout()); return builder.build(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java new file mode 100644 index 0000000000000..043224e357b91 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Checks remote clusters for license compatibility with a specified license predicate. + */ +public final class RemoteClusterLicenseChecker { + + /** + * Encapsulates the license info of a remote cluster. + */ + public static final class RemoteClusterLicenseInfo { + + private final String clusterAlias; + + /** + * The alias of the remote cluster. + * + * @return the cluster alias + */ + public String clusterAlias() { + return clusterAlias; + } + + private final XPackInfoResponse.LicenseInfo licenseInfo; + + /** + * The license info of the remote cluster. + * + * @return the license info + */ + public XPackInfoResponse.LicenseInfo licenseInfo() { + return licenseInfo; + } + + RemoteClusterLicenseInfo(final String clusterAlias, final XPackInfoResponse.LicenseInfo licenseInfo) { + this.clusterAlias = clusterAlias; + this.licenseInfo = licenseInfo; + } + + } + + /** + * Encapsulates a remote cluster license check. The check is either successful if the license of the remote cluster is compatible with + * the predicate used to check license compatibility, or the check is a failure. + */ + public static final class LicenseCheck { + + private final RemoteClusterLicenseInfo remoteClusterLicenseInfo; + + /** + * The remote cluster license info. This method should only be invoked if this instance represents a failing license check. + * + * @return the remote cluster license info + */ + public RemoteClusterLicenseInfo remoteClusterLicenseInfo() { + assert isSuccess() == false; + return remoteClusterLicenseInfo; + } + + private static final LicenseCheck SUCCESS = new LicenseCheck(null); + + /** + * A successful license check. + * + * @return a successful license check instance + */ + public static LicenseCheck success() { + return SUCCESS; + } + + /** + * Test if this instance represents a successful license check. + * + * @return true if this instance represents a successful license check, otherwise false + */ + public boolean isSuccess() { + return this == SUCCESS; + } + + /** + * Creates a failing license check encapsulating the specified remote cluster license info. + * + * @param remoteClusterLicenseInfo the remote cluster license info + * @return a failing license check + */ + public static LicenseCheck failure(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + return new LicenseCheck(remoteClusterLicenseInfo); + } + + private LicenseCheck(final RemoteClusterLicenseInfo remoteClusterLicenseInfo) { + this.remoteClusterLicenseInfo = remoteClusterLicenseInfo; + } + + } + + private final Client client; + private final Predicate predicate; + + /** + * Constructs a remote cluster license checker with the specified license predicate for checking license compatibility. The predicate + * does not need to check for the active license state as this is handled by the remote cluster license checker. + * + * @param client the client + * @param predicate the license predicate + */ + public RemoteClusterLicenseChecker(final Client client, final Predicate predicate) { + this.client = client; + this.predicate = predicate; + } + + public static boolean isLicensePlatinumOrTrial(final XPackInfoResponse.LicenseInfo licenseInfo) { + final License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); + return mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL; + } + + /** + * Checks the specified clusters for license compatibility. The specified callback will be invoked once if all clusters are + * license-compatible, otherwise the specified callback will be invoked once on the first cluster that is not license-compatible. + * + * @param clusterAliases the cluster aliases to check + * @param listener a callback + */ + public void checkRemoteClusterLicenses(final List clusterAliases, final ActionListener listener) { + final Iterator clusterAliasesIterator = clusterAliases.iterator(); + if (clusterAliasesIterator.hasNext() == false) { + listener.onResponse(LicenseCheck.success()); + return; + } + + final AtomicReference clusterAlias = new AtomicReference<>(); + + final ActionListener infoListener = new ActionListener() { + + @Override + public void onResponse(final XPackInfoResponse xPackInfoResponse) { + final XPackInfoResponse.LicenseInfo licenseInfo = xPackInfoResponse.getLicenseInfo(); + if ((licenseInfo.getStatus() == LicenseStatus.ACTIVE) == false || predicate.test(licenseInfo) == false) { + listener.onResponse(LicenseCheck.failure(new RemoteClusterLicenseInfo(clusterAlias.get(), licenseInfo))); + return; + } + + if (clusterAliasesIterator.hasNext()) { + clusterAlias.set(clusterAliasesIterator.next()); + // recurse to the next cluster + remoteClusterLicense(clusterAlias.get(), this); + } else { + listener.onResponse(LicenseCheck.success()); + } + } + + @Override + public void onFailure(final Exception e) { + final String message = "could not determine the license type for cluster [" + clusterAlias.get() + "]"; + listener.onFailure(new ElasticsearchException(message, e)); + } + + }; + + // check the license on the first cluster, and then we recursively check licenses on the remaining clusters + clusterAlias.set(clusterAliasesIterator.next()); + remoteClusterLicense(clusterAlias.get(), infoListener); + } + + private void remoteClusterLicense(final String clusterAlias, final ActionListener listener) { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final ContextPreservingActionListener contextPreservingActionListener = + new ContextPreservingActionListener<>(threadContext.newRestorableContext(false), listener); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any existing context information + threadContext.markAsSystemContext(); + + final XPackInfoRequest request = new XPackInfoRequest(); + request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); + try { + client.getRemoteClusterClient(clusterAlias).execute(XPackInfoAction.INSTANCE, request, contextPreservingActionListener); + } catch (final Exception e) { + contextPreservingActionListener.onFailure(e); + } + } + } + + /** + * Predicate to test if the index name represents the name of a remote index. + * + * @param index the index name + * @return true if the collection of indices contains a remote index, otherwise false + */ + public static boolean isRemoteIndex(final String index) { + return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; + } + + /** + * Predicate to test if the collection of index names contains any that represent the name of a remote index. + * + * @param indices the collection of index names + * @return true if the collection of index names contains a name that represents a remote index, otherwise false + */ + public static boolean containsRemoteIndex(final List indices) { + return indices.stream().anyMatch(RemoteClusterLicenseChecker::isRemoteIndex); + } + + /** + * Filters the collection of index names for names that represent a remote index. Remote index names are of the form + * {@code cluster_name:index_name}. + * + * @param indices the collection of index names + * @return list of index names that represent remote index names + */ + public static List remoteIndices(final List indices) { + return indices.stream().filter(RemoteClusterLicenseChecker::isRemoteIndex).collect(Collectors.toList()); + } + + /** + * Extract the list of remote cluster aliases from the list of index names. Remote index names are of the form + * {@code cluster_alias:index_name} and the cluster_alias is extracted for each index name that represents a remote index. + * + * @param indices the collection of index names + * @return the remote cluster names + */ + public static List remoteClusterAliases(final List indices) { + return indices.stream() + .filter(RemoteClusterLicenseChecker::isRemoteIndex) + .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) + .distinct() + .collect(Collectors.toList()); + } + + /** + * Constructs an error message for license incompatibility. + * + * @param feature the name of the feature that initiated the remote cluster license check. + * @param remoteClusterLicenseInfo the remote cluster license info of the cluster that failed the license check + * @return an error message representing license incompatibility + */ + public static String buildErrorMessage( + final String feature, + final RemoteClusterLicenseInfo remoteClusterLicenseInfo, + final Predicate predicate) { + final StringBuilder error = new StringBuilder(); + if (remoteClusterLicenseInfo.licenseInfo().getStatus() != LicenseStatus.ACTIVE) { + error.append(String.format(Locale.ROOT, "the license on cluster [%s] is not active", remoteClusterLicenseInfo.clusterAlias())); + } else { + assert predicate.test(remoteClusterLicenseInfo.licenseInfo()) == false : "license must be incompatible to build error message"; + final String message = String.format( + Locale.ROOT, + "the license mode [%s] on cluster [%s] does not enable [%s]", + License.OperationMode.resolve(remoteClusterLicenseInfo.licenseInfo().getMode()), + remoteClusterLicenseInfo.clusterAlias(), + feature); + error.append(message); + } + + return error.toString(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java new file mode 100644 index 0000000000000..a8627d2154209 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java @@ -0,0 +1,414 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.license.LicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.action.XPackInfoAction; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public final class RemoteClusterLicenseCheckerTests extends ESTestCase { + + public void testIsNotRemoteIndex() { + assertFalse(RemoteClusterLicenseChecker.isRemoteIndex("local-index")); + } + + public void testIsRemoteIndex() { + assertTrue(RemoteClusterLicenseChecker.isRemoteIndex("remote-cluster:remote-index")); + } + + public void testNoRemoteIndex() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertFalse(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testRemoteIndex() { + final List indices = Arrays.asList("local-index", "remote-cluster:remote-index"); + assertTrue(RemoteClusterLicenseChecker.containsRemoteIndex(indices)); + } + + public void testNoRemoteIndices() { + final List indices = Collections.singletonList("local-index"); + assertThat(RemoteClusterLicenseChecker.remoteIndices(indices), is(empty())); + } + + public void testRemoteIndices() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:index1", "local-index2", "remote-cluster2:index1"); + assertThat( + RemoteClusterLicenseChecker.remoteIndices(indices), + containsInAnyOrder("remote-cluster1:index1", "remote-cluster2:index1")); + } + + public void testNoRemoteClusterAliases() { + final List indices = Arrays.asList("local-index1", "local-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), empty()); + } + + public void testOneRemoteClusterAlias() { + final List indices = Arrays.asList("local-index1", "remote-cluster1:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1")); + } + + public void testMoreThanOneRemoteClusterAlias() { + final List indices = Arrays.asList("remote-cluster1:remote-index1", "local-index1", "remote-cluster2:remote-index1"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testDuplicateRemoteClusterAlias() { + final List indices = Arrays.asList( + "remote-cluster1:remote-index1", "local-index1", "remote-cluster2:index1", "remote-cluster2:remote-index2"); + assertThat(RemoteClusterLicenseChecker.remoteClusterAliases(indices), contains("remote-cluster1", "remote-cluster2")); + } + + public void testCheckRemoteClusterLicensesGivenCompatibleLicenses() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertTrue(licenseCheck.get().isSuccess()); + } + + public void testCheckRemoteClusterLicensesGivenIncompatibleLicense() { + final AtomicInteger index = new AtomicInteger(); + final List remoteClusterAliases = Arrays.asList("good", "cluster-with-basic-license", "good2"); + final List responses = new ArrayList<>(); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + final AtomicReference licenseCheck = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + licenseCheck.set(response); + } + + @Override + public void onFailure(final Exception e) { + fail(e.getMessage()); + } + + })); + + verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + assertNotNull(licenseCheck.get()); + assertFalse(licenseCheck.get().isSuccess()); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().clusterAlias(), equalTo("cluster-with-basic-license")); + assertThat(licenseCheck.get().remoteClusterLicenseInfo().licenseInfo().getType(), equalTo("BASIC")); + } + + public void testCheckRemoteClusterLicencesGivenNonExistentCluster() { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final String failingClusterAlias = randomFrom(remoteClusterAliases); + final ThreadPool threadPool = createMockThreadPool(); + final Client client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, failingClusterAlias); + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + final AtomicReference exception = new AtomicReference<>(); + + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + exception.set(e); + } + + })); + + assertNotNull(exception.get()); + assertThat(exception.get(), instanceOf(ElasticsearchException.class)); + assertThat(exception.get().getMessage(), equalTo("could not determine the license type for cluster [" + failingClusterAlias + "]")); + assertNotNull(exception.get().getCause()); + assertThat(exception.get().getCause(), instanceOf(IllegalArgumentException.class)); + } + + public void testRemoteClusterLicenseCallUsesSystemContext() throws InterruptedException { + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final Client client = createMockClient(threadPool); + doAnswer(invocationMock -> { + assertTrue(threadPool.getThreadContext().isSystemContext()); + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + + final List remoteClusterAliases = Collections.singletonList("valid"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, doubleInvocationProtectingListener(ActionListener.wrap(() -> {}))); + + verify(client, times(1)).execute(same(XPackInfoAction.INSTANCE), any(), any()); + } finally { + terminate(threadPool); + } + } + + public void testListenerIsExecutedWithCallingContext() throws InterruptedException { + final AtomicInteger index = new AtomicInteger(); + final List responses = new ArrayList<>(); + + final ThreadPool threadPool = new TestThreadPool(getTestName()); + + try { + final List remoteClusterAliases = Arrays.asList("valid1", "valid2", "valid3"); + final Client client; + final boolean failure = randomBoolean(); + if (failure) { + client = createMockClientThatThrowsOnGetRemoteClusterClient(threadPool, randomFrom(remoteClusterAliases)); + } else { + client = createMockClient(threadPool); + } + doAnswer(invocationMock -> { + @SuppressWarnings("unchecked") ActionListener listener = + (ActionListener) invocationMock.getArguments()[2]; + listener.onResponse(responses.get(index.getAndIncrement())); + return null; + }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); + + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); + + final RemoteClusterLicenseChecker licenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + threadPool.getThreadContext().putHeader("key", "value"); + licenseChecker.checkRemoteClusterLicenses( + remoteClusterAliases, + doubleInvocationProtectingListener(new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (failure) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + @Override + public void onFailure(final Exception e) { + if (failure == false) { + fail(); + } + assertThat(threadPool.getThreadContext().getHeader("key"), equalTo("value")); + assertFalse(threadPool.getThreadContext().isSystemContext()); + listenerInvoked.set(true); + } + + })); + + assertTrue(listenerInvoked.get()); + } finally { + terminate(threadPool); + } + } + + public void testBuildErrorMessageForActiveCompatibleLicense() { + final XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); + final AssertionError e = expectThrows( + AssertionError.class, + () -> RemoteClusterLicenseChecker.buildErrorMessage("", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + assertThat(e, hasToString(containsString("license must be incompatible to build error message"))); + } + + public void testBuildErrorMessageForIncompatibleLicense() { + final XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license mode [BASIC] on cluster [basic-cluster] does not enable [Feature]")); + } + + public void testBuildErrorMessageForInactiveLicense() { + final XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); + final RemoteClusterLicenseChecker.RemoteClusterLicenseInfo info = + new RemoteClusterLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); + assertThat( + RemoteClusterLicenseChecker.buildErrorMessage("Feature", info, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial), + equalTo("the license on cluster [expired-cluster] is not active")); + } + + private ActionListener doubleInvocationProtectingListener( + final ActionListener listener) { + final AtomicBoolean listenerInvoked = new AtomicBoolean(); + return new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck response) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onResponse(response); + } + + @Override + public void onFailure(final Exception e) { + if (listenerInvoked.compareAndSet(false, true) == false) { + fail("listener invoked twice"); + } + listener.onFailure(e); + } + + }; + } + + private ThreadPool createMockThreadPool() { + final ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + return threadPool; + } + + private Client createMockClient(final ThreadPool threadPool) { + return createMockClient(threadPool, client -> when(client.getRemoteClusterClient(anyString())).thenReturn(client)); + } + + private Client createMockClientThatThrowsOnGetRemoteClusterClient(final ThreadPool threadPool, final String clusterAlias) { + return createMockClient( + threadPool, + client -> { + when(client.getRemoteClusterClient(clusterAlias)).thenThrow(new IllegalArgumentException()); + when(client.getRemoteClusterClient(argThat(not(clusterAlias)))).thenReturn(client); + }); + } + + private Client createMockClient(final ThreadPool threadPool, final Consumer finish) { + final Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + finish.accept(client); + return client; + } + + private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); + } + + private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { + return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 0ea9eb7764803..d6ebdd0449e98 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; @@ -46,10 +47,10 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedNodeSelector; -import org.elasticsearch.xpack.ml.datafeed.MlRemoteLicenseChecker; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Predicate; @@ -141,19 +142,22 @@ public void onFailure(Exception e) { DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId()); Job job = mlMetadata.getJobs().get(datafeed.getJobId()); - if (MlRemoteLicenseChecker.containsRemoteIndex(datafeed.getIndices())) { - MlRemoteLicenseChecker remoteLicenseChecker = new MlRemoteLicenseChecker(client); - remoteLicenseChecker.checkRemoteClusterLicenses(MlRemoteLicenseChecker.remoteClusterNames(datafeed.getIndices()), + if (RemoteClusterLicenseChecker.containsRemoteIndex(datafeed.getIndices())) { + final RemoteClusterLicenseChecker remoteClusterLicenseChecker = + new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + remoteClusterLicenseChecker.checkRemoteClusterLicenses( + RemoteClusterLicenseChecker.remoteClusterAliases(datafeed.getIndices()), ActionListener.wrap( response -> { - if (response.isViolated()) { + if (response.isSuccess() == false) { listener.onFailure(createUnlicensedError(datafeed.getId(), response)); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); } }, - e -> listener.onFailure(createUnknownLicenseError(datafeed.getId(), - MlRemoteLicenseChecker.remoteIndices(datafeed.getIndices()), e)) + e -> listener.onFailure( + createUnknownLicenseError( + datafeed.getId(), RemoteClusterLicenseChecker.remoteIndices(datafeed.getIndices()), e)) )); } else { createDataExtractor(job, datafeed, params, waitForTaskListener); @@ -232,23 +236,35 @@ public void onFailure(Exception e) { ); } - private ElasticsearchStatusException createUnlicensedError(String datafeedId, - MlRemoteLicenseChecker.LicenseViolation licenseViolation) { - String message = "Cannot start datafeed [" + datafeedId + "] as it is configured to use " - + "indices on a remote cluster [" + licenseViolation.get().getClusterName() - + "] that is not licensed for Machine Learning. " - + MlRemoteLicenseChecker.buildErrorMessage(licenseViolation.get()); - + private ElasticsearchStatusException createUnlicensedError( + final String datafeedId, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + final String message = String.format( + Locale.ROOT, + "cannot start datafeed [%s] as it is configured to use indices on remote cluster [%s] that is not licensed for ml; %s", + datafeedId, + licenseCheck.remoteClusterLicenseInfo().clusterAlias(), + RemoteClusterLicenseChecker.buildErrorMessage( + "ml", + licenseCheck.remoteClusterLicenseInfo(), + RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); } - private ElasticsearchStatusException createUnknownLicenseError(String datafeedId, List remoteIndices, - Exception cause) { - String message = "Cannot start datafeed [" + datafeedId + "] as it is configured to use" - + " indices on a remote cluster " + remoteIndices - + " but the license type could not be verified"; - - return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, new Exception(cause.getMessage())); + private ElasticsearchStatusException createUnknownLicenseError( + final String datafeedId, final List remoteIndices, final Exception cause) { + final int numberOfRemoteClusters = RemoteClusterLicenseChecker.remoteClusterAliases(remoteIndices).size(); + assert numberOfRemoteClusters > 0; + final String remoteClusterQualifier = numberOfRemoteClusters == 1 ? "a remote cluster" : "remote clusters"; + final String licenseTypeQualifier = numberOfRemoteClusters == 1 ? "" : "s"; + final String message = String.format( + Locale.ROOT, + "cannot start datafeed [%s] as it uses indices on %s %s but the license type%s could not be verified", + datafeedId, + remoteClusterQualifier, + remoteIndices, + licenseTypeQualifier); + + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); } public static class StartDatafeedPersistentTasksExecutor extends PersistentTasksExecutor { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index a6be047648623..ce3f611b2227a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -92,7 +93,7 @@ private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) { List indices = datafeed.getIndices(); for (String index : indices) { - if (MlRemoteLicenseChecker.isRemoteIndex(index)) { + if (RemoteClusterLicenseChecker.isRemoteIndex(index)) { // We cannot verify remote indices continue; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java deleted file mode 100644 index b0eeed2c800ec..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseChecker.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.license.License; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.EnumSet; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -/** - * ML datafeeds can use cross cluster search to access data in a remote cluster. - * The remote cluster should be licenced for ML this class performs that check - * using the _xpack (info) endpoint. - */ -public class MlRemoteLicenseChecker { - - private final Client client; - - public static class RemoteClusterLicenseInfo { - private final String clusterName; - private final XPackInfoResponse.LicenseInfo licenseInfo; - - RemoteClusterLicenseInfo(String clusterName, XPackInfoResponse.LicenseInfo licenseInfo) { - this.clusterName = clusterName; - this.licenseInfo = licenseInfo; - } - - public String getClusterName() { - return clusterName; - } - - public XPackInfoResponse.LicenseInfo getLicenseInfo() { - return licenseInfo; - } - } - - public class LicenseViolation { - private final RemoteClusterLicenseInfo licenseInfo; - - private LicenseViolation(@Nullable RemoteClusterLicenseInfo licenseInfo) { - this.licenseInfo = licenseInfo; - } - - public boolean isViolated() { - return licenseInfo != null; - } - - public RemoteClusterLicenseInfo get() { - return licenseInfo; - } - } - - public MlRemoteLicenseChecker(Client client) { - this.client = client; - } - - /** - * Check each cluster is licensed for ML. - * This function evaluates lazily and will terminate when the first cluster - * that is not licensed is found or an error occurs. - * - * @param clusterNames List of remote cluster names - * @param listener Response listener - */ - public void checkRemoteClusterLicenses(List clusterNames, ActionListener listener) { - final Iterator itr = clusterNames.iterator(); - if (itr.hasNext() == false) { - listener.onResponse(new LicenseViolation(null)); - return; - } - - final AtomicReference clusterName = new AtomicReference<>(itr.next()); - - ActionListener infoListener = new ActionListener() { - @Override - public void onResponse(XPackInfoResponse xPackInfoResponse) { - if (licenseSupportsML(xPackInfoResponse.getLicenseInfo()) == false) { - listener.onResponse(new LicenseViolation( - new RemoteClusterLicenseInfo(clusterName.get(), xPackInfoResponse.getLicenseInfo()))); - return; - } - - if (itr.hasNext()) { - clusterName.set(itr.next()); - remoteClusterLicense(clusterName.get(), this); - } else { - listener.onResponse(new LicenseViolation(null)); - } - } - - @Override - public void onFailure(Exception e) { - String message = "Could not determine the X-Pack licence type for cluster [" + clusterName.get() + "]"; - if (e instanceof ActionNotFoundTransportException) { - // This is likely to be because x-pack is not installed in the target cluster - message += ". Is X-Pack installed on the target cluster?"; - } - listener.onFailure(new ElasticsearchException(message, e)); - } - }; - - remoteClusterLicense(clusterName.get(), infoListener); - } - - private void remoteClusterLicense(String clusterName, ActionListener listener) { - Client remoteClusterClient = client.getRemoteClusterClient(clusterName); - ThreadContext threadContext = remoteClusterClient.threadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - // we stash any context here since this is an internal execution and should not leak any - // existing context information. - threadContext.markAsSystemContext(); - - XPackInfoRequest request = new XPackInfoRequest(); - request.setCategories(EnumSet.of(XPackInfoRequest.Category.LICENSE)); - remoteClusterClient.execute(XPackInfoAction.INSTANCE, request, listener); - } - } - - static boolean licenseSupportsML(XPackInfoResponse.LicenseInfo licenseInfo) { - License.OperationMode mode = License.OperationMode.resolve(licenseInfo.getMode()); - return licenseInfo.getStatus() == LicenseStatus.ACTIVE && - (mode == License.OperationMode.PLATINUM || mode == License.OperationMode.TRIAL); - } - - public static boolean isRemoteIndex(String index) { - return index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR) != -1; - } - - public static boolean containsRemoteIndex(List indices) { - return indices.stream().anyMatch(MlRemoteLicenseChecker::isRemoteIndex); - } - - /** - * Get any remote indices used in cross cluster search. - * Remote indices are of the form {@code cluster_name:index_name} - * @return List of remote cluster indices - */ - public static List remoteIndices(List indices) { - return indices.stream().filter(MlRemoteLicenseChecker::isRemoteIndex).collect(Collectors.toList()); - } - - /** - * Extract the list of remote cluster names from the list of indices. - * @param indices List of indices. Remote cluster indices are prefixed - * with {@code cluster-name:} - * @return Every cluster name found in {@code indices} - */ - public static List remoteClusterNames(List indices) { - return indices.stream() - .filter(MlRemoteLicenseChecker::isRemoteIndex) - .map(index -> index.substring(0, index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR))) - .distinct() - .collect(Collectors.toList()); - } - - public static String buildErrorMessage(RemoteClusterLicenseInfo clusterLicenseInfo) { - StringBuilder error = new StringBuilder(); - if (clusterLicenseInfo.licenseInfo.getStatus() != LicenseStatus.ACTIVE) { - error.append("The license on cluster [").append(clusterLicenseInfo.clusterName) - .append("] is not active. "); - } else { - License.OperationMode mode = License.OperationMode.resolve(clusterLicenseInfo.licenseInfo.getMode()); - if (mode != License.OperationMode.PLATINUM && mode != License.OperationMode.TRIAL) { - error.append("The license mode [").append(mode) - .append("] on cluster [") - .append(clusterLicenseInfo.clusterName) - .append("] does not enable Machine Learning. "); - } - } - - error.append(Strings.toString(clusterLicenseInfo.licenseInfo)); - return error.toString(); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index 72c8d361dd882..610a5c1b92fb6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -3,10 +3,12 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ml.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -14,7 +16,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java deleted file mode 100644 index 81e4c75cfad7c..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/MlRemoteLicenseCheckerTests.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ml.datafeed; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.LicenseStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.action.XPackInfoAction; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.is; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class MlRemoteLicenseCheckerTests extends ESTestCase { - - public void testIsRemoteIndex() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertFalse(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - indices = Arrays.asList("local-index1", "remote-cluster:remote-index2"); - assertTrue(MlRemoteLicenseChecker.containsRemoteIndex(indices)); - } - - public void testRemoteIndices() { - List indices = Collections.singletonList("local-index"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), is(empty())); - indices = Arrays.asList("local-index", "remote-cluster:index1", "local-index2", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteIndices(indices), containsInAnyOrder("remote-cluster:index1", "remote-cluster2:index1")); - } - - public void testRemoteClusterNames() { - List indices = Arrays.asList("local-index1", "local-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), empty()); - indices = Arrays.asList("local-index1", "remote-cluster1:remote-index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - indices = Arrays.asList("remote-cluster1:index2", "index1", "remote-cluster2:index1", "remote-cluster2:index2"); - assertThat(MlRemoteLicenseChecker.remoteClusterNames(indices), contains("remote-cluster1", "remote-cluster2")); - } - - public void testLicenseSupportsML() { - XPackInfoResponse.LicenseInfo licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", - LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "trial", "trial", LicenseStatus.EXPIRED, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "GOLD", "GOLD", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertFalse(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - - licenseInfo = new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - assertTrue(MlRemoteLicenseChecker.licenseSupportsML(licenseInfo)); - } - - public void testCheckRemoteClusterLicenses_givenValidLicenses() { - final AtomicInteger index = new AtomicInteger(0); - final List responses = new ArrayList<>(); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - - List remoteClusterNames = Arrays.asList("valid1", "valid2", "valid3"); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(3)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertFalse(licCheckResponse.get().isViolated()); - assertNull(licCheckResponse.get().get()); - } - - public void testCheckRemoteClusterLicenses_givenInvalidLicense() { - final AtomicInteger index = new AtomicInteger(0); - List remoteClusterNames = Arrays.asList("good", "cluster-with-basic-license", "good2"); - final List responses = new ArrayList<>(); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createBasicLicenseResponse(), null)); - responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); - - Client client = createMockClient(); - doAnswer(invocationMock -> { - @SuppressWarnings("raw_types") - ActionListener listener = (ActionListener) invocationMock.getArguments()[2]; - listener.onResponse(responses.get(index.getAndIncrement())); - return null; - }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); - - MlRemoteLicenseChecker licenseChecker = new MlRemoteLicenseChecker(client); - AtomicReference licCheckResponse = new AtomicReference<>(); - - licenseChecker.checkRemoteClusterLicenses(remoteClusterNames, - new ActionListener() { - @Override - public void onResponse(MlRemoteLicenseChecker.LicenseViolation response) { - licCheckResponse.set(response); - } - - @Override - public void onFailure(Exception e) { - fail(e.getMessage()); - } - }); - - verify(client, times(2)).execute(same(XPackInfoAction.INSTANCE), any(), any()); - assertNotNull(licCheckResponse.get()); - assertTrue(licCheckResponse.get().isViolated()); - assertEquals("cluster-with-basic-license", licCheckResponse.get().get().getClusterName()); - assertEquals("BASIC", licCheckResponse.get().get().getLicenseInfo().getType()); - } - - public void testBuildErrorMessage() { - XPackInfoResponse.LicenseInfo platinumLicence = createPlatinumLicenseResponse(); - MlRemoteLicenseChecker.RemoteClusterLicenseInfo info = - new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("platinum-cluster", platinumLicence); - assertEquals(Strings.toString(platinumLicence), MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo basicLicense = createBasicLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("basic-cluster", basicLicense); - String expected = "The license mode [BASIC] on cluster [basic-cluster] does not enable Machine Learning. " - + Strings.toString(basicLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - - XPackInfoResponse.LicenseInfo expiredLicense = createExpiredLicenseResponse(); - info = new MlRemoteLicenseChecker.RemoteClusterLicenseInfo("expired-cluster", expiredLicense); - expected = "The license on cluster [expired-cluster] is not active. " + Strings.toString(expiredLicense); - assertEquals(expected, MlRemoteLicenseChecker.buildErrorMessage(info)); - } - - private Client createMockClient() { - Client client = mock(Client.class); - ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - when(client.getRemoteClusterClient(anyString())).thenReturn(client); - return client; - } - - private XPackInfoResponse.LicenseInfo createPlatinumLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createBasicLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "BASIC", "BASIC", LicenseStatus.ACTIVE, randomNonNegativeLong()); - } - - private XPackInfoResponse.LicenseInfo createExpiredLicenseResponse() { - return new XPackInfoResponse.LicenseInfo("uid", "PLATINUM", "PLATINUM", LicenseStatus.EXPIRED, randomNonNegativeLong()); - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index 7ed00a01f3565..c83521591fcf4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -392,7 +392,7 @@ public void testResetScrollAfterSearchPhaseExecutionException() throws IOExcepti assertThat(extractor.hasNext(), is(true)); output = extractor.next(); assertThat(output.isPresent(), is(true)); - assertEquals(new Long(1400L), extractor.getLastTimestamp()); + assertEquals(Long.valueOf(1400L), extractor.getLastTimestamp()); // A second failure is not tolerated assertThat(extractor.hasNext(), is(true)); expectThrows(SearchPhaseExecutionException.class, extractor::next); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index 303fce3d81321..e36c313b626c9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -125,7 +125,7 @@ public void testGetCalandarByJobId() throws Exception { Long matchedCount = queryResult.stream().filter( c -> c.getId().equals("foo calendar") || c.getId().equals("foo bar calendar") || c.getId().equals("cat foo calendar")) .count(); - assertEquals(new Long(3), matchedCount); + assertEquals(Long.valueOf(3), matchedCount); queryResult = getCalendars("bar"); assertThat(queryResult, hasSize(1)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java index cd5a720eef1dc..28f2756cf262c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheck.java @@ -13,12 +13,6 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140JKSKeystoreBootstrapCheck(Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +22,7 @@ public class FIPS140JKSKeystoreBootstrapCheck implements BootstrapCheck { @Override public BootstrapCheckResult check(BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final Settings settings = context.settings; Settings keystoreTypeSettings = settings.filter(k -> k.endsWith("keystore.type")) .filter(k -> settings.get(k).equalsIgnoreCase("jks")); @@ -50,6 +44,6 @@ public BootstrapCheckResult check(BootstrapContext context) { @Override public boolean alwaysEnforce() { - return fipsModeEnabled; + return true; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java index d1bce0dcdd24c..957276bdad2f3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheck.java @@ -10,6 +10,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.EnumSet; @@ -21,15 +22,9 @@ final class FIPS140LicenseBootstrapCheck implements BootstrapCheck { static final EnumSet ALLOWED_LICENSE_OPERATION_MODES = EnumSet.of(License.OperationMode.PLATINUM, License.OperationMode.TRIAL); - private final boolean isInFipsMode; - - FIPS140LicenseBootstrapCheck(boolean isInFipsMode) { - this.isInFipsMode = isInFipsMode; - } - @Override public BootstrapCheckResult check(BootstrapContext context) { - if (isInFipsMode) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { License license = LicenseService.getLicense(context.metaData); if (license != null && ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode()) == false) { return BootstrapCheckResult.failure("FIPS mode is only allowed with a Platinum or Trial license"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java index 751d63be4fb3f..3faec3d747575 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheck.java @@ -7,19 +7,12 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Locale; public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapCheck { - private final boolean fipsModeEnabled; - - FIPS140PasswordHashingAlgorithmBootstrapCheck(final Settings settings) { - this.fipsModeEnabled = XPackSettings.FIPS_MODE_ENABLED.get(settings); - } - /** * Test if the node fails the check. * @@ -28,7 +21,7 @@ public class FIPS140PasswordHashingAlgorithmBootstrapCheck implements BootstrapC */ @Override public BootstrapCheckResult check(final BootstrapContext context) { - if (fipsModeEnabled) { + if (XPackSettings.FIPS_MODE_ENABLED.get(context.settings)) { final String selectedAlgorithm = XPackSettings.PASSWORD_HASHING_ALGORITHM.get(context.settings); if (selectedAlgorithm.toLowerCase(Locale.ROOT).startsWith("pbkdf2") == false) { return BootstrapCheckResult.failure("Only PBKDF2 is allowed for password hashing in a FIPS-140 JVM. Please set the " + diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index f99d7b8e729a4..347f5b620781b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -309,9 +309,9 @@ public Security(Settings settings, final Path configPath) { new PkiRealmBootstrapCheck(getSslService()), new TLSLicenseBootstrapCheck(), new FIPS140SecureSettingsBootstrapCheck(settings, env), - new FIPS140JKSKeystoreBootstrapCheck(settings), - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings), - new FIPS140LicenseBootstrapCheck(XPackSettings.FIPS_MODE_ENABLED.get(settings)))); + new FIPS140JKSKeystoreBootstrapCheck(), + new FIPS140PasswordHashingAlgorithmBootstrapCheck(), + new FIPS140LicenseBootstrapCheck())); checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); this.bootstrapChecks = Collections.unmodifiableList(checks); Automatons.updateMaxDeterminizedStates(settings); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 77170f7a1cbfb..a343ee47d4240 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -31,7 +31,6 @@ import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -441,7 +440,7 @@ protected Set getRemoteClusterNames() { } @Override - protected void updateRemoteCluster(String clusterAlias, List addresses) { + protected void updateRemoteCluster(String clusterAlias, List addresses) { if (addresses.isEmpty()) { clusters.remove(clusterAlias); } else { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java index 1d4da71e11b5e..b659adf22cfc3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140JKSKeystoreBootstrapCheckTests.java @@ -14,7 +14,7 @@ public class FIPS140JKSKeystoreBootstrapCheckTests extends ESTestCase { public void testNoKeystoreIsAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true"); - assertFalse(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertFalse(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLKeystoreTypeIsNotAllowed() { @@ -22,7 +22,7 @@ public void testSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testSSLImplicitKeystoreTypeIsNotAllowed() { @@ -30,7 +30,7 @@ public void testSSLImplicitKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.ssl.keystore.path", "/this/is/the/path") .put("xpack.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testTransportSSLKeystoreTypeIsNotAllowed() { @@ -38,7 +38,7 @@ public void testTransportSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.transport.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.transport.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testHttpSSLKeystoreTypeIsNotAllowed() { @@ -46,7 +46,7 @@ public void testHttpSSLKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.http.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.http.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testRealmKeystoreTypeIsNotAllowed() { @@ -54,13 +54,13 @@ public void testRealmKeystoreTypeIsNotAllowed() { .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path") .put("xpack.security.authc.realms.ldap.ssl.keystore.type", "JKS"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } public void testImplicitRealmKeystoreTypeIsNotAllowed() { final Settings.Builder settings = Settings.builder() .put("xpack.security.fips_mode.enabled", "true") .put("xpack.security.authc.realms.ldap.ssl.keystore.path", "/this/is/the/path"); - assertTrue(new FIPS140JKSKeystoreBootstrapCheck(settings.build()).check(new BootstrapContext(settings.build(), null)).isFailure()); + assertTrue(new FIPS140JKSKeystoreBootstrapCheck().check(new BootstrapContext(settings.build(), null)).isFailure()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java index a2ec8f9fb2053..fb4c9e21a258f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140LicenseBootstrapCheckTests.java @@ -17,27 +17,29 @@ public class FIPS140LicenseBootstrapCheckTests extends ESTestCase { public void testBootstrapCheck() throws Exception { - assertTrue(new FIPS140LicenseBootstrapCheck(false) - .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(randomBoolean()) + assertTrue(new FIPS140LicenseBootstrapCheck() .check(new BootstrapContext(Settings.EMPTY, MetaData.EMPTY_META_DATA)).isSuccess()); + assertTrue(new FIPS140LicenseBootstrapCheck() + .check(new BootstrapContext(Settings.builder().put("xpack.security.fips_mode.enabled", randomBoolean()).build(), MetaData + .EMPTY_META_DATA)).isSuccess()); - License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); MetaData.Builder builder = MetaData.builder(); + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); TestUtils.putLicense(builder, license); MetaData metaData = builder.build(); + if (FIPS140LicenseBootstrapCheck.ALLOWED_LICENSE_OPERATION_MODES.contains(license.operationMode())) { - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); } else { - assertTrue(new FIPS140LicenseBootstrapCheck(false).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", false).build(), metaData)).isSuccess()); - assertTrue(new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + assertTrue(new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).isFailure()); assertEquals("FIPS mode is only allowed with a Platinum or Trial license", - new FIPS140LicenseBootstrapCheck(true).check(new BootstrapContext( + new FIPS140LicenseBootstrapCheck().check(new BootstrapContext( Settings.builder().put("xpack.security.fips_mode.enabled", true).build(), metaData)).getMessage()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java index 8632400866a07..6376ca211dcae 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/FIPS140PasswordHashingAlgorithmBootstrapCheckTests.java @@ -25,7 +25,7 @@ public void testPBKDF2AlgorithmIsAllowed() { .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2_10000") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } @@ -35,7 +35,7 @@ public void testPBKDF2AlgorithmIsAllowed() { .put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), "PBKDF2") .build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertFalse(result.isFailure()); } } @@ -55,7 +55,7 @@ private void runBCRYPTTest(final boolean fipsModeEnabled, final String passwordH } final Settings settings = builder.build(); final BootstrapCheck.BootstrapCheckResult result = - new FIPS140PasswordHashingAlgorithmBootstrapCheck(settings).check(new BootstrapContext(settings, null)); + new FIPS140PasswordHashingAlgorithmBootstrapCheck().check(new BootstrapContext(settings, null)); assertThat(result.isFailure(), equalTo(fipsModeEnabled)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index 39d62c7bcea98..2bd1bdf906ad8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -71,6 +71,9 @@ public abstract class KerberosTestCase extends ESTestCase { unsupportedLocaleLanguages.add("fa"); unsupportedLocaleLanguages.add("ks"); unsupportedLocaleLanguages.add("ckb"); + unsupportedLocaleLanguages.add("ne"); + unsupportedLocaleLanguages.add("dz"); + unsupportedLocaleLanguages.add("mzn"); } @BeforeClass @@ -144,7 +147,7 @@ protected Path createPrincipalKeyTab(final Path dir, final String... princNames) /** * Creates principal with given name and password. - * + * * @param principalName Principal name * @param password Password * @throws Exception thrown if principal could not be created @@ -178,7 +181,7 @@ static T doAsWrapper(final Subject subject, final PrivilegedExceptionAction< /** * Write content to provided keytab file. - * + * * @param keytabPath {@link Path} to keytab file. * @param content Content for keytab * @return key tab path @@ -193,7 +196,7 @@ public static Path writeKeyTab(final Path keytabPath, final String content) thro /** * Build kerberos realm settings with default config and given keytab - * + * * @param keytabPath key tab file path * @return {@link Settings} for kerberos realm */ @@ -203,7 +206,7 @@ public static Settings buildKerberosRealmSettings(final String keytabPath) { /** * Build kerberos realm settings - * + * * @param keytabPath key tab file path * @param maxUsersInCache max users to be maintained in cache * @param cacheTTL time to live for cached entries diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index aa9d434f332e3..3b5180b71f7c2 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -58,7 +58,7 @@ private TypeConverter() { private static final long DAY_IN_MILLIS = 60 * 60 * 24; private static final Map, JDBCType> javaToJDBC; - + static { Map, JDBCType> aMap = Arrays.stream(DataType.values()) .filter(dataType -> dataType.javaClass() != null @@ -119,7 +119,7 @@ private static T dateTimeConvert(Long millis, Calendar c, Function T convert(Object val, JDBCType columnType, Class type) throws SQLE if (type == null) { return (T) convert(val, columnType); } - + if (type.isInstance(val)) { try { return type.cast(val); @@ -150,7 +150,7 @@ static T convert(Object val, JDBCType columnType, Class type) throws SQLE throw new SQLDataException("Unable to convert " + val.getClass().getName() + " to " + columnType, cce); } } - + if (type == String.class) { return (T) asString(convert(val, columnType)); } @@ -276,8 +276,8 @@ static boolean isSigned(JDBCType jdbcType) throws SQLException { } return dataType.isSigned(); } - - + + static JDBCType fromJavaToJDBC(Class clazz) throws SQLException { for (Entry, JDBCType> e : javaToJDBC.entrySet()) { // java.util.Calendar from {@code javaToJDBC} is an abstract class and this method can be used with concrete classes as well @@ -285,7 +285,7 @@ static JDBCType fromJavaToJDBC(Class clazz) throws SQLException { return e.getValue(); } } - + throw new SQLFeatureNotSupportedException("Objects of type " + clazz.getName() + " are not supported"); } @@ -432,7 +432,7 @@ private static Float asFloat(Object val, JDBCType columnType) throws SQLExceptio case REAL: case FLOAT: case DOUBLE: - return new Float(((Number) val).doubleValue()); + return Float.valueOf((((float) ((Number) val).doubleValue()))); default: } @@ -451,7 +451,7 @@ private static Double asDouble(Object val, JDBCType columnType) throws SQLExcept case REAL: case FLOAT: case DOUBLE: - return new Double(((Number) val).doubleValue()); + return Double.valueOf(((Number) val).doubleValue()); default: } @@ -539,4 +539,4 @@ private static long safeToLong(double x) throws SQLException { } return Math.round(x); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java index ad96825896e1a..9da06f6537c0e 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java @@ -38,27 +38,27 @@ import static java.sql.JDBCType.VARCHAR; public class JdbcPreparedStatementTests extends ESTestCase { - + public void testSettingBooleanValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setBoolean(1, true); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, false); assertEquals(false, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, true, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); assertTrue(value(jps) instanceof Boolean); - + jps.setObject(1, true, Types.INTEGER); assertEquals(1, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, true, Types.VARCHAR); assertEquals("true", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); @@ -66,264 +66,264 @@ public void testSettingBooleanValues() throws SQLException { public void testThrownExceptionsWhenSettingBooleanValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, true, Types.TIMESTAMP)); assertEquals("Conversion from type [BOOLEAN] to [Timestamp] not supported", sqle.getMessage()); } - + public void testSettingStringValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setString(1, "foo bar"); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, "foo bar"); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, "foo bar", Types.VARCHAR); assertEquals("foo bar", value(jps)); assertEquals(VARCHAR, jdbcType(jps)); assertTrue(value(jps) instanceof String); } - + public void testThrownExceptionsWhenSettingStringValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, "foo bar", Types.INTEGER)); assertEquals("Conversion from type [VARCHAR] to [Integer] not supported", sqle.getMessage()); } - + public void testSettingByteTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + jps.setByte(1, (byte) 6); assertEquals((byte) 6, value(jps)); assertEquals(TINYINT, jdbcType(jps)); - + jps.setObject(1, (byte) 6); assertEquals((byte) 6, value(jps)); assertEquals(TINYINT, jdbcType(jps)); assertTrue(value(jps) instanceof Byte); - + jps.setObject(1, (byte) 0, Types.BOOLEAN); assertEquals(false, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (byte) 123, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (byte) 123, Types.INTEGER); assertEquals(123, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, (byte) -128, Types.DOUBLE); assertEquals(-128.0, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingByteTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (byte) 6, Types.TIMESTAMP)); assertEquals("Conversion from type [TINYINT] to [Timestamp] not supported", sqle.getMessage()); } - + public void testSettingShortTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + short someShort = randomShort(); jps.setShort(1, someShort); assertEquals(someShort, value(jps)); assertEquals(SMALLINT, jdbcType(jps)); - + jps.setObject(1, someShort); assertEquals(someShort, value(jps)); assertEquals(SMALLINT, jdbcType(jps)); assertTrue(value(jps) instanceof Short); - + jps.setObject(1, (short) 1, Types.BOOLEAN); assertEquals(true, value(jps)); assertEquals(BOOLEAN, jdbcType(jps)); - + jps.setObject(1, (short) -32700, Types.DOUBLE); assertEquals(-32700.0, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someShort, Types.INTEGER); assertEquals((int) someShort, value(jps)); assertEquals(INTEGER, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingShortTypeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (short) 6, Types.TIMESTAMP)); assertEquals("Conversion from type [SMALLINT] to [Timestamp] not supported", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, 256, Types.TINYINT)); assertEquals("Numeric " + 256 + " out of range", sqle.getMessage()); } - + public void testSettingIntegerValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + int someInt = randomInt(); jps.setInt(1, someInt); assertEquals(someInt, value(jps)); assertEquals(INTEGER, jdbcType(jps)); - + jps.setObject(1, someInt); assertEquals(someInt, value(jps)); assertEquals(INTEGER, jdbcType(jps)); assertTrue(value(jps) instanceof Integer); - + jps.setObject(1, someInt, Types.VARCHAR); assertEquals(String.valueOf(someInt), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someInt, Types.FLOAT); assertEquals(Double.valueOf(someInt), value(jps)); assertTrue(value(jps) instanceof Double); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingIntegerValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); int someInt = randomInt(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someInt, Types.TIMESTAMP)); assertEquals("Conversion from type [INTEGER] to [Timestamp] not supported", sqle.getMessage()); - + Integer randomIntNotShort = randomIntBetween(32768, Integer.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.SMALLINT)); assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.TINYINT)); assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage()); } - + public void testSettingLongValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + long someLong = randomLong(); jps.setLong(1, someLong); assertEquals(someLong, value(jps)); assertEquals(BIGINT, jdbcType(jps)); - + jps.setObject(1, someLong); assertEquals(someLong, value(jps)); assertEquals(BIGINT, jdbcType(jps)); assertTrue(value(jps) instanceof Long); - + jps.setObject(1, someLong, Types.VARCHAR); assertEquals(String.valueOf(someLong), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someLong, Types.DOUBLE); assertEquals((double) someLong, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someLong, Types.FLOAT); assertEquals((double) someLong, value(jps)); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingLongValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); long someLong = randomLong(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someLong, Types.TIMESTAMP)); assertEquals("Conversion from type [BIGINT] to [Timestamp] not supported", sqle.getMessage()); - + Long randomLongNotShort = randomLongBetween(Integer.MAX_VALUE + 1, Long.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.INTEGER)); assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.SMALLINT)); assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage()); } - + public void testSettingFloatValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + float someFloat = randomFloat(); jps.setFloat(1, someFloat); assertEquals(someFloat, value(jps)); assertEquals(REAL, jdbcType(jps)); - + jps.setObject(1, someFloat); assertEquals(someFloat, value(jps)); assertEquals(REAL, jdbcType(jps)); assertTrue(value(jps) instanceof Float); - + jps.setObject(1, someFloat, Types.VARCHAR); assertEquals(String.valueOf(someFloat), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someFloat, Types.DOUBLE); assertEquals((double) someFloat, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someFloat, Types.FLOAT); assertEquals((double) someFloat, value(jps)); assertEquals(FLOAT, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingFloatValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); float someFloat = randomFloat(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someFloat, Types.TIMESTAMP)); assertEquals("Conversion from type [REAL] to [Timestamp] not supported", sqle.getMessage()); - + Float floatNotInt = 5_155_000_000f; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.INTEGER)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage()); - + sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.SMALLINT)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage()); } - + public void testSettingDoubleValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + double someDouble = randomDouble(); jps.setDouble(1, someDouble); assertEquals(someDouble, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); - + jps.setObject(1, someDouble); assertEquals(someDouble, value(jps)); assertEquals(DOUBLE, jdbcType(jps)); assertTrue(value(jps) instanceof Double); - + jps.setObject(1, someDouble, Types.VARCHAR); assertEquals(String.valueOf(someDouble), value(jps)); assertEquals(VARCHAR, jdbcType(jps)); - + jps.setObject(1, someDouble, Types.REAL); - assertEquals(new Float(someDouble), value(jps)); + assertEquals((float) someDouble, value(jps)); assertEquals(REAL, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingDoubleValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); double someDouble = randomDouble(); - + SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someDouble, Types.TIMESTAMP)); assertEquals("Conversion from type [DOUBLE] to [Timestamp] not supported", sqle.getMessage()); - + Double doubleNotInt = 5_155_000_000d; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, doubleNotInt, Types.INTEGER)); - assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", + assertEquals(String.format(Locale.ROOT, "Numeric %s out of range", Long.toString(((Number) doubleNotInt).longValue())), sqle.getMessage()); } - + public void testUnsupportedClasses() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLFeatureNotSupportedException sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new Struct() { @@ -341,23 +341,23 @@ public Object[] getAttributes() throws SQLException { } })); assertEquals("Objects of type java.sql.Struct are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new URL("http://test"))); assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setURL(1, new URL("http://test"))); assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage()); - + sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, this, Types.TIMESTAMP)); assertEquals("Conversion from type " + this.getClass().getName() + " to TIMESTAMP not supported", sfnse.getMessage()); - + SQLException se = expectThrows(SQLException.class, () -> jps.setObject(1, this, 1_000_000)); assertEquals("Type:1000000 is not a valid Types.java value.", se.getMessage()); - + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> jps.setObject(1, randomShort(), Types.CHAR)); assertEquals("Unsupported JDBC type [CHAR]", iae.getMessage()); } - + public void testSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); @@ -365,186 +365,186 @@ public void testSettingTimestampValues() throws SQLException { jps.setTimestamp(1, someTimestamp); assertEquals(someTimestamp.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - + Calendar nonDefaultCal = randomCalendar(); // February 29th, 2016. 01:17:55 GMT = 1456708675000 millis since epoch jps.setTimestamp(1, new Timestamp(1456708675000L), nonDefaultCal); assertEquals(1456708675000L, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); - + long beforeEpochTime = -randomMillisSinceEpoch(); jps.setTimestamp(1, new Timestamp(beforeEpochTime), nonDefaultCal); assertEquals(beforeEpochTime, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someTimestamp, Types.VARCHAR); assertEquals(someTimestamp.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someTimestamp, Types.INTEGER)); assertEquals("Conversion from type java.sql.Timestamp to INTEGER not supported", sqle.getMessage()); } public void testSettingTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + Time time = new Time(4675000); Calendar nonDefaultCal = randomCalendar(); jps.setTime(1, time, nonDefaultCal); assertEquals(4675000, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, time, Types.VARCHAR); assertEquals(time.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Time time = new Time(4675000); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, time, Types.INTEGER)); assertEquals("Conversion from type java.sql.Time to INTEGER not supported", sqle.getMessage()); } - + public void testSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); jps.setDate(1, someSqlDate); assertEquals(someSqlDate.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - + someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); Calendar nonDefaultCal = randomCalendar(); jps.setDate(1, someSqlDate, nonDefaultCal); assertEquals(someSqlDate.getTime(), convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someSqlDate, Types.VARCHAR); assertEquals(someSqlDate.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); - - SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, + + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new java.sql.Date(randomMillisSinceEpoch()), Types.DOUBLE)); assertEquals("Conversion from type " + someSqlDate.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } - + public void testSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); someCalendar.setTimeInMillis(randomMillisSinceEpoch()); - + jps.setObject(1, someCalendar); assertEquals(someCalendar.getTime(), (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someCalendar, Types.VARCHAR); assertEquals(someCalendar.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); - + Calendar nonDefaultCal = randomCalendar(); jps.setObject(1, nonDefaultCal); assertEquals(nonDefaultCal.getTime(), (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someCalendar, Types.DOUBLE)); assertEquals("Conversion from type " + someCalendar.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } - + public void testSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Date someDate = new Date(randomMillisSinceEpoch()); - + jps.setObject(1, someDate); assertEquals(someDate, (Date) value(jps)); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, someDate, Types.VARCHAR); assertEquals(someDate.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Date someDate = new Date(randomMillisSinceEpoch()); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someDate, Types.BIGINT)); assertEquals("Conversion from type " + someDate.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); } - + public void testSettingLocalDateTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone()); - + jps.setObject(1, ldt); assertEquals(Date.class, value(jps).getClass()); assertEquals(TIMESTAMP, jdbcType(jps)); assertTrue(value(jps) instanceof java.util.Date); - + jps.setObject(1, ldt, Types.VARCHAR); assertEquals(ldt.toString(), value(jps).toString()); assertEquals(VARCHAR, jdbcType(jps)); } - + public void testThrownExceptionsWhenSettingLocalDateTimeValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone()); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, ldt, Types.BIGINT)); assertEquals("Conversion from type " + ldt.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); } - + public void testSettingByteArrayValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - + byte[] buffer = "some data".getBytes(StandardCharsets.UTF_8); jps.setBytes(1, buffer); assertEquals(byte[].class, value(jps).getClass()); assertEquals(VARBINARY, jdbcType(jps)); - + jps.setObject(1, buffer); assertEquals(byte[].class, value(jps).getClass()); assertEquals(VARBINARY, jdbcType(jps)); assertTrue(value(jps) instanceof byte[]); - + jps.setObject(1, buffer, Types.VARBINARY); assertEquals((byte[]) value(jps), buffer); assertEquals(VARBINARY, jdbcType(jps)); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR)); assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage()); - + sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE)); assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } - + public void testThrownExceptionsWhenSettingByteArrayValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); byte[] buffer = "foo".getBytes(StandardCharsets.UTF_8); - + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR)); assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage()); - + sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE)); assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } @@ -564,14 +564,14 @@ private JDBCType jdbcType(JdbcPreparedStatement jps) throws SQLException { private Object value(JdbcPreparedStatement jps) throws SQLException { return jps.query.getParam(1).value; } - + private Calendar randomCalendar() { return Calendar.getInstance(randomTimeZone(), Locale.ROOT); } - + /* * Converts from UTC to the provided Calendar. - * Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert + * Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert * the values correctly to UTC. */ private long convertFromUTCtoCalendar(Date date, Calendar nonDefaultCal) throws SQLException { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java index ec309c69476cc..695c9b192eaa6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java @@ -13,20 +13,15 @@ public abstract class CronnableSchedule implements Schedule { - private static final Comparator CRON_COMPARATOR = new Comparator() { - @Override - public int compare(Cron c1, Cron c2) { - return c1.expression().compareTo(c2.expression()); - } - }; + private static final Comparator CRON_COMPARATOR = Comparator.comparing(Cron::expression); protected final Cron[] crons; - public CronnableSchedule(String... expressions) { + CronnableSchedule(String... expressions) { this(crons(expressions)); } - public CronnableSchedule(Cron... crons) { + private CronnableSchedule(Cron... crons) { assert crons.length > 0; this.crons = crons; Arrays.sort(crons, CRON_COMPARATOR); @@ -37,7 +32,15 @@ public long nextScheduledTimeAfter(long startTime, long time) { assert time >= startTime; long nextTime = Long.MAX_VALUE; for (Cron cron : crons) { - nextTime = Math.min(nextTime, cron.getNextValidTimeAfter(time)); + long nextValidTimeAfter = cron.getNextValidTimeAfter(time); + + boolean previousCronExpired = nextTime == -1; + boolean currentCronValid = nextValidTimeAfter > -1; + if (previousCronExpired && currentCronValid) { + nextTime = nextValidTimeAfter; + } else { + nextTime = Math.min(nextTime, nextValidTimeAfter); + } } return nextTime; } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java index 7331be4553bbe..7c3abd1d808e9 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java @@ -33,7 +33,7 @@ public void testOpEvalEQ() throws Exception { assertThat(CompareCondition.Op.EQ.eval(null, null), is(true)); assertThat(CompareCondition.Op.EQ.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.EQ.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.EQ.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.EQ.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.EQ.eval(3, null), is(false)); assertThat(CompareCondition.Op.EQ.eval(2, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.EQ.eval(3, "4"), is(false)); // comparing as strings @@ -59,7 +59,7 @@ public void testOpEvalNotEQ() throws Exception { assertThat(CompareCondition.Op.NOT_EQ.eval(null, null), is(false)); assertThat(CompareCondition.Op.NOT_EQ.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.NOT_EQ.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(2, Float.valueOf((float)3.0)), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(3, null), is(true)); assertThat(CompareCondition.Op.NOT_EQ.eval(2, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.NOT_EQ.eval(3, "4"), is(true)); // comparing as strings @@ -83,7 +83,7 @@ public void testOpEvalNotEQ() throws Exception { public void testOpEvalGTE() throws Exception { assertThat(CompareCondition.Op.GTE.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.GTE.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.GTE.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GTE.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.GTE.eval(3, null), is(false)); assertThat(CompareCondition.Op.GTE.eval(3, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.GTE.eval(3, "4"), is(false)); // comparing as strings @@ -103,7 +103,7 @@ public void testOpEvalGTE() throws Exception { public void testOpEvalGT() throws Exception { assertThat(CompareCondition.Op.GT.eval(4, 3.0), is(true)); assertThat(CompareCondition.Op.GT.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.GT.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GT.eval(2, Float.valueOf((float)3.0)), is(false)); assertThat(CompareCondition.Op.GT.eval(3, null), is(false)); assertThat(CompareCondition.Op.GT.eval(3, "2"), is(true)); // comparing as strings assertThat(CompareCondition.Op.GT.eval(3, "4"), is(false)); // comparing as strings @@ -124,7 +124,7 @@ public void testOpEvalGT() throws Exception { public void testOpEvalLTE() throws Exception { assertThat(CompareCondition.Op.LTE.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.LTE.eval(3, 3.0), is(true)); - assertThat(CompareCondition.Op.LTE.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LTE.eval(2, Float.valueOf((float) 3.0)), is(true)); assertThat(CompareCondition.Op.LTE.eval(3, null), is(false)); assertThat(CompareCondition.Op.LTE.eval(3, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.LTE.eval(3, "4"), is(true)); // comparing as strings @@ -144,7 +144,7 @@ public void testOpEvalLTE() throws Exception { public void testOpEvalLT() throws Exception { assertThat(CompareCondition.Op.LT.eval(4, 3.0), is(false)); assertThat(CompareCondition.Op.LT.eval(3, 3.0), is(false)); - assertThat(CompareCondition.Op.LT.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LT.eval(2, Float.valueOf((float) 3.0)), is(true)); assertThat(CompareCondition.Op.LT.eval(3, null), is(false)); assertThat(CompareCondition.Op.LT.eval(3, "2"), is(false)); // comparing as strings assertThat(CompareCondition.Op.LT.eval(3, "4"), is(true)); // comparing as strings diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java index 1ade767410b53..6cbdf6e122656 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java @@ -11,11 +11,15 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.hasItemInArray; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; public class CronScheduleTests extends ScheduleTestCase { public void testInvalid() throws Exception { @@ -54,18 +58,25 @@ public void testParseMultiple() throws Exception { assertThat(crons, hasItemInArray("0 0/3 * * * ?")); } + public void testMultipleCronsNextScheduledAfter() { + CronSchedule schedule = new CronSchedule("0 5 9 1 1 ? 2019", "0 5 9 1 1 ? 2020", "0 5 9 1 1 ? 2017"); + ZonedDateTime start2019 = ZonedDateTime.of(2019, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + ZonedDateTime start2020 = ZonedDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + long firstSchedule = schedule.nextScheduledTimeAfter(0, start2019.toInstant().toEpochMilli()); + long secondSchedule = schedule.nextScheduledTimeAfter(0, start2020.toInstant().toEpochMilli()); + + assertThat(firstSchedule, is(not(-1L))); + assertThat(secondSchedule, is(not(-1L))); + assertThat(firstSchedule, is(not(secondSchedule))); + } + public void testParseInvalidBadExpression() throws Exception { XContentBuilder builder = jsonBuilder().value("0 0/5 * * ?"); BytesReference bytes = BytesReference.bytes(builder); XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); parser.nextToken(); - try { - new CronSchedule.Parser().parse(parser); - fail("expected cron parsing to fail when using invalid cron expression"); - } catch (ElasticsearchParseException pe) { - // expected - assertThat(pe.getCause(), instanceOf(IllegalArgumentException.class)); - } + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> new CronSchedule.Parser().parse(parser)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } public void testParseInvalidEmpty() throws Exception {