diff --git a/Vagrantfile b/Vagrantfile index 6f81ba0273c9f..6761fec07dab2 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -337,6 +337,7 @@ export BATS=/project/build/bats export BATS_UTILS=/project/build/packaging/bats/utils export BATS_TESTS=/project/build/packaging/bats/tests export PACKAGING_ARCHIVES=/project/build/packaging/archives +export PACKAGING_TESTS=/project/build/packaging/tests VARS cat \<\ /etc/sudoers.d/elasticsearch_vars Defaults env_keep += "ZIP" @@ -347,6 +348,7 @@ Defaults env_keep += "BATS" Defaults env_keep += "BATS_UTILS" Defaults env_keep += "BATS_TESTS" Defaults env_keep += "PACKAGING_ARCHIVES" +Defaults env_keep += "PACKAGING_TESTS" SUDOERS_VARS chmod 0440 /etc/sudoers.d/elasticsearch_vars SHELL diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index a44b9c849d333..4e512b3cdd418 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -549,6 +549,22 @@ class BuildPlugin implements Plugin { javadoc.classpath = javadoc.getClasspath().filter { f -> return classes.contains(f) == false } + /* + * Force html5 on projects that support it to silence the warning + * that `javadoc` will change its defaults in the future. + * + * But not all of our javadoc is actually valid html5. So we + * have to become valid incrementally. We only set html5 on the + * projects we have converted so that we still get the annoying + * warning on the unconverted ones. That will give us an + * incentive to convert them.... + */ + List html4Projects = [ + ':server', + ] + if (false == html4Projects.contains(project.path)) { + javadoc.options.addBooleanOption('html5', true) + } } configureJavadocJar(project) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy index 264a1e0f8ac17..e9b664a5a31b7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy @@ -41,6 +41,9 @@ class VagrantPropertiesExtension { @Input Boolean inheritTestUtils + @Input + String testClass + VagrantPropertiesExtension(List availableBoxes) { this.boxes = availableBoxes this.batsDir = 'src/test/resources/packaging' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 7a0b9f96781df..bb85359ae3f07 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -51,6 +51,7 @@ class VagrantTestPlugin implements Plugin { static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] private static final PACKAGING_CONFIGURATION = 'packaging' + private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest' private static final BATS = 'bats' private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest" @@ -66,6 +67,7 @@ class VagrantTestPlugin implements Plugin { // Creates custom configurations for Bats testing files (and associated scripts and archives) createPackagingConfiguration(project) + project.configurations.create(PACKAGING_TEST_CONFIGURATION) // Creates all the main Vagrant tasks createVagrantTasks(project) @@ -144,10 +146,12 @@ class VagrantTestPlugin implements Plugin { } private static void createCleanTask(Project project) { - project.tasks.create('clean', Delete.class) { - description 'Clean the project build directory' - group 'Build' - delete project.buildDir + if (project.tasks.findByName('clean') == null) { + project.tasks.create('clean', Delete.class) { + description 'Clean the project build directory' + group 'Build' + delete project.buildDir + } } } @@ -174,6 +178,18 @@ class VagrantTestPlugin implements Plugin { from project.configurations[PACKAGING_CONFIGURATION] } + File testsDir = new File(packagingDir, 'tests') + Copy copyPackagingTests = project.tasks.create('copyPackagingTests', Copy) { + into testsDir + from project.configurations[PACKAGING_TEST_CONFIGURATION] + } + + Task createTestRunnerScript = project.tasks.create('createTestRunnerScript', FileContentsTask) { + dependsOn copyPackagingTests + file "${testsDir}/run-tests.sh" + contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}" + } + Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) { dependsOn copyPackagingArchives file "${archivesDir}/version" @@ -234,7 +250,8 @@ class VagrantTestPlugin implements Plugin { Task vagrantSetUpTask = project.tasks.create('setupPackagingTest') vagrantSetUpTask.dependsOn 'vagrantCheckVersion' - vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile + vagrantSetUpTask.dependsOn copyPackagingArchives, copyPackagingTests, createTestRunnerScript + vagrantSetUpTask.dependsOn createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils } @@ -393,20 +410,29 @@ class VagrantTestPlugin implements Plugin { packagingTest.dependsOn(batsPackagingTest) } - // This task doesn't do anything yet. In the future it will execute a jar containing tests on the vm - Task groovyPackagingTest = project.tasks.create("vagrant${boxTask}#groovyPackagingTest") - groovyPackagingTest.dependsOn(up) - groovyPackagingTest.finalizedBy(halt) + Task javaPackagingTest = project.tasks.create("vagrant${boxTask}#javaPackagingTest", VagrantCommandTask) { + command 'ssh' + boxName box + environmentVars vagrantEnvVars + dependsOn up, setupPackagingTest + finalizedBy halt + args '--command', "bash \"\$PACKAGING_TESTS/run-tests.sh\"" + } + + // todo remove this onlyIf after all packaging tests are consolidated + javaPackagingTest.onlyIf { + project.extensions.esvagrant.testClass != null + } - TaskExecutionAdapter groovyPackagingReproListener = createReproListener(project, groovyPackagingTest.path) - groovyPackagingTest.doFirst { - project.gradle.addListener(groovyPackagingReproListener) + TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path) + javaPackagingTest.doFirst { + project.gradle.addListener(javaPackagingReproListener) } - groovyPackagingTest.doLast { - project.gradle.removeListener(groovyPackagingReproListener) + javaPackagingTest.doLast { + project.gradle.removeListener(javaPackagingReproListener) } if (project.extensions.esvagrant.boxes.contains(box)) { - packagingTest.dependsOn(groovyPackagingTest) + packagingTest.dependsOn(javaPackagingTest) } Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java index 1034e722e8789..468152a88df3b 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/NoopBulkRequestBuilder.java @@ -129,7 +129,8 @@ public NoopBulkRequestBuilder setWaitForActiveShards(final int waitForActiveShar } /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + * A timeout to wait if the index operation can't be performed immediately. + * Defaults to {@code 1m}. */ public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) { request.timeout(timeout); @@ -137,7 +138,8 @@ public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) { } /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. + * A timeout to wait if the index operation can't be performed immediately. + * Defaults to {@code 1m}. */ public final NoopBulkRequestBuilder setTimeout(String timeout) { request.timeout(timeout); @@ -151,4 +153,3 @@ public int numberOfActions() { return request.numberOfActions(); } } - diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java index 5143bdd870594..9f22d3ec2c0c7 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/NoopSearchRequestBuilder.java @@ -142,7 +142,7 @@ public NoopSearchRequestBuilder setRouting(String... routing) { /** * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards, _primary to execute only on primary shards, or + * {@code _local} to prefer local shards, {@code _primary} to execute only on primary shards, or * a custom value, which guarantees that the same order will be used across different requests. */ public NoopSearchRequestBuilder setPreference(String preference) { @@ -188,7 +188,7 @@ public NoopSearchRequestBuilder setMinScore(float minScore) { } /** - * From index to start the search from. Defaults to 0. + * From index to start the search from. Defaults to {@code 0}. */ public NoopSearchRequestBuilder setFrom(int from) { sourceBuilder().from(from); @@ -196,7 +196,7 @@ public NoopSearchRequestBuilder setFrom(int from) { } /** - * The number of search hits to return. Defaults to 10. + * The number of search hits to return. Defaults to {@code 10}. */ public NoopSearchRequestBuilder setSize(int size) { sourceBuilder().size(size); @@ -349,7 +349,7 @@ public NoopSearchRequestBuilder slice(SliceBuilder builder) { /** * Applies when sorting, and controls if scores will be tracked as well. Defaults to - * false. + * {@code false}. */ public NoopSearchRequestBuilder setTrackScores(boolean trackScores) { sourceBuilder().trackScores(trackScores); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 2a029e797d158..380c9fabdbef4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -842,13 +842,6 @@ Params withLocal(boolean local) { return this; } - Params withFlatSettings(boolean flatSettings) { - if (flatSettings) { - return putParam("flat_settings", Boolean.TRUE.toString()); - } - return this; - } - Params withIncludeDefaults(boolean includeDefaults) { if (includeDefaults) { return putParam("include_defaults", Boolean.TRUE.toString()); diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 03e4a082d274c..e226656dbd226 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -20,7 +20,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks apply plugin: 'elasticsearch.build' -apply plugin: 'ru.vyarus.animalsniffer' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -52,8 +51,6 @@ dependencies { testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.elasticsearch:securemock:${versions.securemock}" testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" - testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" - signature "org.codehaus.mojo.signature:java17:1.0@signature" } forbiddenApisMain { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java index 483b7df62f95a..f13d175110434 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java @@ -60,8 +60,6 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; -//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes -@IgnoreJRERequirement public class ElasticsearchHostsSnifferTests extends RestClientTestCase { private int sniffRequestTimeout; diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index f2fc297a9e4c8..9fa06021236a2 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -217,6 +217,24 @@ subprojects { } check.dependsOn checkNotice + if (project.name == 'zip' || project.name == 'tar') { + task checkMlCppNotice { + dependsOn buildDist, checkExtraction + onlyIf toolExists + doLast { + // this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines + final List expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003") + final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt") + final List actualLines = Files.readAllLines(noticePath) + for (final String expectedLine : expectedLines) { + if (actualLines.contains(expectedLine) == false) { + throw new GradleException("expected [${noticePath}] to contain [${expectedLine}] but it did not") + } + } + } + } + check.dependsOn checkMlCppNotice + } } /***************************************************************************** diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 02603540b4f91..30018fcf42a29 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -1,20 +1,4877 @@ +[[es-release-notes]] += {es} Release Notes + +[partintro] +-- // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. // :issue: https://github.com/elastic/elasticsearch/issues/ // :pull: https://github.com/elastic/elasticsearch/pull/ -= Elasticsearch Release Notes +This section summarizes the changes in each release. + +* <> + + +-- + +[[release-notes-6.4.0]] +== {es} version 6.4.0 + +//[float] +//[[breaking-6.4.0]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) + +[float] +=== Bug Fixes + +Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) + + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.3.0]] +== {es} version 6.3.0 + +//[float] +//[[breaking-6.3.0]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.2.4]] +== {es} version 6.2.4 + +//[float] +//[[breaking-6.2.4]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Engine:: +* Harden periodically check to avoid endless flush loop {pull}29125[#29125] (issues: {issue}28350[#28350], {issue}29097[#29097]) + +Ingest:: +* Don't allow referencing the pattern bank name in the pattern bank {pull}29295[#29295] (issue: {issue}29257[#29257]) + +Java High Level REST Client:: +* Bulk processor#awaitClose to close scheduler {pull}29263[#29263] + +Java Low Level REST Client:: +* REST client: hosts marked dead for the first time should not be immediately retried {pull}29230[#29230] + +Network:: +* Cross-cluster search and default connections can get crossed [OPEN] [ISSUE] {pull}29321[#29321] + +Percolator:: +* Fixed bug when non percolator docs end up in the search hits {pull}29447[#29447] (issue: {issue}29429[#29429]) +* Fixed a msm accounting error that can occur during analyzing a percolator query {pull}29415[#29415] (issue: {issue}29393[#29393]) +* Fix more query extraction bugs. {pull}29388[#29388] (issues: {issue}28353[#28353], {issue}29376[#29376]) +* Fix some query extraction bugs. {pull}29283[#29283] + +Plugins:: +* Plugins: Fix native controller confirmation for non-meta plugin {pull}29434[#29434] + +Search:: +* Propagate ignore_unmapped to inner_hits {pull}29261[#29261] (issue: {issue}29071[#29071]) + +Settings:: +* Archive unknown or invalid settings on updates {pull}28888[#28888] (issue: {issue}28609[#28609]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.2.3]] +== {es} version 6.2.3 + +//[float] +//[[breaking-6.2.3]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Highlighting:: +* Limit analyzed text for highlighting (improvements) {pull}28808[#28808] (issues: {issue}16764[#16764], {issue}27934[#27934]) + +Recovery:: +* Require translogUUID when reading global checkpoint {pull}28587[#28587] (issue: {issue}28435[#28435]) + +[float] +=== Bug Fixes + +Core:: +* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) + +Engine:: +* Avoid class cast exception from index writer {pull}28989[#28989] +* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) +* Never block on key in `LiveVersionMap#pruneTombstones` {pull}28736[#28736] (issue: {issue}28714[#28714]) + +Ingest:: +* Continue registering pipelines after one pipeline parse failure. {pull}28752[#28752] (issue: {issue}28269[#28269]) + +Java High Level REST Client:: +* REST high-level client: encode path parts {pull}28663[#28663] (issue: {issue}28625[#28625]) + +Packaging:: +* Delay path expansion on Windows {pull}28753[#28753] (issues: {issue}27675[#27675], {issue}28748[#28748]) + +Percolator:: +* Fix percolator query analysis for function_score query {pull}28854[#28854] +* Improved percolator's random candidate query duel test {pull}28840[#28840] + +Snapshot/Restore:: +* Fix NPE when using deprecated Azure settings {pull}28769[#28769] (issues: {issue}23518[#23518], {issue}28299[#28299]) -== Elasticsearch version 6.3.0 +Stats:: +* Fix AdaptiveSelectionStats serialization bug {pull}28718[#28718] (issue: {issue}28713[#28713]) +//[float] +//=== Regressions + +//[float] +//=== Known Issues + + +[[release-notes-6.2.2]] +== {es} version 6.2.2 + +//[float] +//[[breaking-6.2.2]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Recovery:: +* Synced-flush should not seal index of out of sync replicas {pull}28464[#28464] (issue: {issue}10032[#10032]) + +[float] +=== Bug Fixes + +Core:: +* Handle throws on tasks submitted to thread pools {pull}28667[#28667] +* Fix size blocking queue to not lie about its weight {pull}28557[#28557] (issue: {issue}28547[#28547]) + +Ingest:: +* Guard accessDeclaredMembers for Tika on JDK 10 {pull}28603[#28603] (issue: {issue}28602[#28602]) +* Fix for bug that prevents pipelines to load that use stored scripts after a restart {pull}28588[#28588] + +Java High Level REST Client:: +* Fix parsing of script fields {pull}28395[#28395] (issue: {issue}28380[#28380]) +* Move to POST when calling API to retrieve which support request body {pull}28342[#28342] (issue: {issue}28326[#28326]) + +Packaging:: +* Fix using relative custom config path {pull}28700[#28700] (issue: {issue}27610[#27610]) +* Disable console logging in the Windows service {pull}28618[#28618] (issue: {issue}20422[#20422]) + +Percolator:: +* Do not take duplicate query extractions into account for minimum_should_match attribute {pull}28353[#28353] (issue: {issue}28315[#28315]) + +Recovery:: +* Fsync directory after cleanup {pull}28604[#28604] (issue: {issue}28435[#28435]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.2.1]] +== {es} version 6.2.1 + +//[float] +//[[breaking-6.2.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Plugin Lang Painless:: +* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) + +Plugins:: +* Fix the ability to remove old plugin {pull}28540[#28540] (issue: {issue}28538[#28538]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.2.0]] +== {es} version 6.2.0 + +[float] +[[breaking-6.2.0]] +=== Breaking Changes + +Aggregations:: +* Add a new cluster setting to limit the total number of buckets returned by a request {pull}27581[#27581] (issues: {issue}26012[#26012], {issue}27452[#27452]) + +Core:: +* Forbid granting the all permission in production {pull}27548[#27548] + +Highlighting:: +* Limit the analyzed text for highlighting {pull}27934[#27934] (issue: {issue}27517[#27517]) + +Rollover:: +* Fail rollover if duplicated alias found in templates {pull}28110[#28110] (issue: {issue}26976[#26976]) + +Search:: +* Introduce limit to the number of terms in Terms Query {pull}27968[#27968] (issue: {issue}18829[#18829]) + +[float] +=== Breaking Java Changes + +Java API:: +* Remove `operationThreaded` from Java API {pull}27836[#27836] + +Java High Level REST Client:: +* REST high-level client: remove index suffix from indices client method names {pull}28263[#28263] + +[float] +=== Deprecations + +Analysis:: +* Backport delimited payload filter renaming {pull}27535[#27535] (issue: {issue}26625[#26625]) + +Suggesters:: +* deprecating `jarowinkler` in favor of `jaro_winkler` {pull}27526[#27526] +* Deprecating `levenstein` in favor of `levensHtein` {pull}27409[#27409] (issue: {issue}27325[#27325]) + +[float] === New Features +Plugin Ingest GeoIp:: +* Enable ASN support for Ingest GeoIP plugin. {pull}27958[#27958] (issue: {issue}27849[#27849]) + +Plugin Lang Painless:: +* Painless: Add spi jar that will be published for extending whitelists {pull}28302[#28302] +* Painless: Add a simple cache for whitelist methods and fields. {pull}28142[#28142] + +Plugins:: +* Add the ability to bundle multiple plugins into a meta plugin {pull}28022[#28022] (issue: {issue}27316[#27316]) + +Rank Evaluation:: +* Backport of ranking evaluation API (#27478) {pull}27844[#27844] (issue: {issue}27478[#27478]) + +Recovery:: +* Backport for using lastSyncedGlobalCheckpoint in deletion policy {pull}27866[#27866] (issue: {issue}27826[#27826]) + +Reindex API:: +* Add scroll parameter to _reindex API {pull}28041[#28041] (issue: {issue}27555[#27555]) + +[float] +=== Enhancements + +Allocation:: +* Fix cluster.routing.allocation.enable and cluster.routing.rebalance.enable case {pull}28037[#28037] (issue: {issue}28007[#28007]) +* Add node id to shard failure message {pull}28024[#28024] (issue: {issue}28018[#28018]) + +Analysis:: +* Limit the analyzed text for highlighting (#27934) {pull}28176[#28176] (issue: {issue}27517[#27517]) +* Allow TrimFilter to be used in custom normalizers {pull}27758[#27758] (issue: {issue}27310[#27310]) + +Circuit Breakers:: +* Add accounting circuit breaker and track segment memory usage {pull}27116[#27116] (issue: {issue}27044[#27044]) + +Cluster:: +* Adds wait_for_no_initializing_shards to cluster health API {pull}27489[#27489] (issue: {issue}25623[#25623]) + +Core:: +* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28190[#28190] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) +* Introduce elasticsearch-core jar {pull}28178[#28178] (issue: {issue}27933[#27933]) +* Add Writeable.Reader support to TransportResponseHandler {pull}28010[#28010] (issue: {issue}26315[#26315]) +* Simplify rejected execution exception {pull}27664[#27664] (issue: {issue}27663[#27663]) +* Add node name to thread pool executor name {pull}27663[#27663] (issues: {issue}26007[#26007], {issue}26835[#26835]) + +Discovery:: +* Add information when master node left to DiscoveryNodes' shortSummary() {pull}28197[#28197] (issue: {issue}28169[#28169]) + +Engine:: +* Move uid lock into LiveVersionMap {pull}27905[#27905] +* Optimize version map for append-only indexing {pull}27752[#27752] + +Geo:: +* [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder {pull}27692[#27692] (issues: {issue}27690[#27690], {issue}9120[#9120]) +* [Geo] Add Well Known Text (WKT) Parsing Support to ShapeBuilders {pull}27417[#27417] (issue: {issue}9120[#9120]) + +Highlighting:: +* Include all sentences smaller than fragment_size in the unified highlighter {pull}28132[#28132] (issue: {issue}28089[#28089]) + +Ingest:: +* Enable convert processor to support Long and Double {pull}27891[#27891] (issues: {issue}23085[#23085], {issue}23423[#23423]) + +Internal:: +* Make KeyedLock reentrant {pull}27920[#27920] +* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) +* Tighten the CountedBitSet class {pull}27632[#27632] +* Avoid doing redundant work when checking for self references. {pull}26927[#26927] (issue: {issue}26907[#26907]) + +Java API:: +* Add missing delegate methods to NodeIndicesStats {pull}28092[#28092] +* Java api clean-up : consistency for `shards_acknowledged` getters {pull}27819[#27819] (issue: {issue}27784[#27784]) + +Java High Level REST Client:: +* add toString implementation for UpdateRequest. {pull}27997[#27997] (issue: {issue}27986[#27986]) +* Add Close Index API to the high level REST client {pull}27734[#27734] (issue: {issue}27205[#27205]) +* Add Open Index API to the high level REST client {pull}27574[#27574] (issue: {issue}27205[#27205]) +* Added Create Index support to high-level REST client {pull}27351[#27351] (issue: {issue}27205[#27205]) +* Add multi get api to the high level rest client {pull}27337[#27337] (issue: {issue}27205[#27205]) +* Add msearch api to high level client {pull}27274[#27274] + +Mapping:: +* Allow `_doc` as a type. {pull}27816[#27816] (issues: {issue}27750[#27750], {issue}27751[#27751]) + +Network:: +* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) +* Add read timeouts to http module {pull}27713[#27713] +* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) +* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) +* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) + +Packaging:: +* Extend JVM options to support multiple versions {pull}27675[#27675] (issue: {issue}27646[#27646]) +* Add explicit coreutils dependency {pull}27660[#27660] (issue: {issue}27609[#27609]) +* Detect mktemp from coreutils {pull}27659[#27659] (issues: {issue}27609[#27609], {issue}27643[#27643]) +* Enable GC logs by default {pull}27610[#27610] +* Use private directory for temporary files {pull}27609[#27609] (issues: {issue}14372[#14372], {issue}27144[#27144]) + +Percolator:: +* also extract match_all queries when indexing percolator queries {pull}27585[#27585] + +Plugin Lang Painless:: +* Painless: Add whitelist extensions {pull}28161[#28161] +* Painless: Modify Loader to Load Classes Directly from Definition {pull}28088[#28088] +* Clean Up Painless Cast Object {pull}27794[#27794] +* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] + +Plugins:: +* Add client actions to action plugin {pull}28280[#28280] (issue: {issue}27759[#27759]) +* Plugins: Add validation to plugin descriptor parsing {pull}27951[#27951] +* Plugins: Add plugin extension capabilities {pull}27881[#27881] +* Add support for filtering mappings fields {pull}27603[#27603] + +Rank Evaluation:: +* Simplify RankEvalResponse output {pull}28266[#28266] + +Recovery:: +* Truncate tlog cli should assign global checkpoint {pull}28192[#28192] (issue: {issue}28181[#28181]) +* Replica starts peer recovery with safe commit {pull}28181[#28181] (issue: {issue}10708[#10708]) +* Primary send safe commit in file-based recovery {pull}28038[#28038] (issue: {issue}10708[#10708]) +* Fail resync-failed shards in subsequent writes {pull}28005[#28005] +* Introduce promoting index shard state {pull}28004[#28004] (issue: {issue}24841[#24841]) +* Non-peer recovery should set the global checkpoint {pull}27965[#27965] +* Persist global checkpoint when finalizing a peer recovery {pull}27947[#27947] (issue: {issue}27861[#27861]) +* Rollback a primary before recovering from translog {pull}27804[#27804] (issue: {issue}10708[#10708]) + +Search:: +* Use typeName() to check field type in GeoShapeQueryBuilder {pull}27730[#27730] +* Optimize search_after when sorting in index sort order {pull}26401[#26401] + +Sequence IDs:: +* Do not keep 5.x commits when having 6.x commits {pull}28188[#28188] (issues: {issue}27606[#27606], {issue}28038[#28038]) +* Use lastSyncedGlobalCheckpoint in deletion policy {pull}27826[#27826] (issue: {issue}27606[#27606]) +* Use CountedBitSet in LocalCheckpointTracker {pull}27793[#27793] +* Only fsync global checkpoint if needed {pull}27652[#27652] +* Keep commits and translog up to the global checkpoint {pull}27606[#27606] +* Adjust CombinedDeletionPolicy for multiple commits {pull}27456[#27456] (issues: {issue}10708[#10708], {issue}27367[#27367]) +* Keeps index commits up to the current global checkpoint {pull}27367[#27367] (issue: {issue}10708[#10708]) +* Dedup translog operations by reading in reverse {pull}27268[#27268] (issue: {issue}10708[#10708]) + +Settings:: +* Add validation of keystore setting names {pull}27626[#27626] + +Snapshot/Restore:: +* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] +* Include include_global_state in Snapshot status API (#22423) {pull}26853[#26853] (issue: {issue}22423[#22423]) + +Task Manager:: +* Add ability to associate an ID with tasks {pull}27764[#27764] (issue: {issue}23250[#23250]) + +Translog:: +* Simplify MultiSnapshot#SeqNoset {pull}27547[#27547] (issue: {issue}27268[#27268]) +* Enclose CombinedDeletionPolicy in SnapshotDeletionPolicy {pull}27528[#27528] (issues: {issue}27367[#27367], {issue}27456[#27456]) + +[float] +=== Bug Fixes + +Aggregations:: +* Adds metadata to rewritten aggregations {pull}28185[#28185] (issue: {issue}28170[#28170]) +* Fix NPE on composite aggregation with sub-aggregations that need scores {pull}28129[#28129] +* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) +* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) +* Fix global aggregation that requires breadth first and scores {pull}27942[#27942] (issues: {issue}22321[#22321], {issue}27928[#27928]) +* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] +* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) +* Using DocValueFormat::parseBytesRef for parsing missing value parameter {pull}27855[#27855] (issue: {issue}27788[#27788]) +* Fix illegal cast of the "low cardinality" optimization of the `terms` aggregation. {pull}27543[#27543] +* Always include the _index and _id for nested search hits. {pull}27201[#27201] (issue: {issue}27053[#27053]) + +Allocation:: +* Do not open indices with broken settings {pull}26995[#26995] + +Core:: +* Fix lock accounting in releasable lock {pull}28202[#28202] +* Fixes ByteSizeValue to serialise correctly {pull}27702[#27702] (issue: {issue}27568[#27568]) +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) + +Engine:: +* Replica recovery could go into an endless flushing loop {pull}28350[#28350] +* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) +* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) + +Geo:: +* Correct two equality checks on incomparable types {pull}27688[#27688] +* Handle case where the hole vertex is south of the containing polygon(s) {pull}27685[#27685] (issue: {issue}25933[#25933]) + +Highlighting:: +* Fix highlighting on a keyword field that defines a normalizer {pull}27604[#27604] + +Inner Hits:: +* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) + +Internal:: +* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) +* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) +* Retain originalIndex info when rewriting FieldCapabilities requests {pull}27761[#27761] + +Java REST Client:: +* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) + +Mapping:: +* Ignore null value for range field (#27845) {pull}28116[#28116] (issue: {issue}27845[#27845]) +* Pass `java.locale.providers=COMPAT` to Java 9 onwards {pull}28080[#28080] (issue: {issue}10984[#10984]) +* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) +* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) + +Network:: +* Only bind loopback addresses when binding to local {pull}28029[#28029] (issue: {issue}1877[#1877]) +* Remove potential nio selector leak {pull}27825[#27825] +* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) + +Packaging:: +* Allow custom service names when installing on windows {pull}25255[#25255] (issue: {issue}25231[#25231]) + +Percolator:: +* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) + +Plugin Analysis ICU:: +* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] + +Plugin Analysis Phonetic:: +* Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter {pull}28225[#28225] (issue: {issue}28211[#28211]) + +Plugin Lang Painless:: +* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository HDFS:: +* Fix SecurityException when HDFS Repository used against HA Namenodes {pull}27196[#27196] + +Plugins:: +* Make sure that we don't detect files as maven coordinate when installing a plugin {pull}28163[#28163] +* Fix upgrading indices which use a custom similarity plugin. {pull}26985[#26985] (issue: {issue}25350[#25350]) + +Recovery:: +* Open engine should keep only starting commit {pull}28228[#28228] (issues: {issue}27804[#27804], {issue}28181[#28181]) +* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) +* Set global checkpoint before open engine from store {pull}27972[#27972] (issues: {issue}27965[#27965], {issue}27970[#27970]) +* Check and repair index under the store metadata lock {pull}27768[#27768] (issues: {issue}24481[#24481], {issue}24787[#24787], {issue}27731[#27731]) +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Rollover:: +* Make index rollover action atomic {pull}28039[#28039] (issue: {issue}26976[#26976]) + +Scripting:: +* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] + +Scroll:: +* Reject scroll query if size is 0 (#22552) {pull}27842[#27842] (issue: {issue}22552[#22552]) +* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] + +Search:: +* Fix simple_query_string on invalid input {pull}28219[#28219] (issue: {issue}28204[#28204]) +* Use the underlying connection version for CCS connections {pull}28093[#28093] +* Fix synonym phrase query expansion for cross_fields parsing {pull}28045[#28045] +* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) +* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) + +Sequence IDs:: +* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] +* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) +* Obey translog durability in global checkpoint sync {pull}27641[#27641] + +Settings:: +* Settings: Introduce settings updater for a list of settings {pull}28338[#28338] (issue: {issue}28047[#28047]) +* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) +* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) +* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) + +Snapshot/Restore:: +* Consistent updates of IndexShardSnapshotStatus {pull}28130[#28130] (issue: {issue}26480[#26480]) +* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) +* Do not start snapshots that are deleted during initialization {pull}27931[#27931] +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Consistent update of stage and failure message in IndexShardSnapshotStatus {pull}27557[#27557] (issue: {issue}26480[#26480]) +* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) + +Stats:: +* Fixes DocStats to properly deal with shards that report -1 index size {pull}27863[#27863] +* Include internal refreshes in refresh stats {pull}27615[#27615] + +Term Vectors:: +* Fix term vectors generator with keyword and normalizer {pull}27608[#27608] (issue: {issue}27320[#27320]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Dependencies: Update joda time to 2.9.9 {pull}28261[#28261] +* upgrade to lucene 7.2.1 {pull}28218[#28218] (issue: {issue}28044[#28044]) +* Upgrade jna from 4.4.0-1 to 4.5.1 {pull}28183[#28183] (issue: {issue}28172[#28172]) + +Ingest:: +* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] + +[[release-notes-6.1.4]] +== {es} version 6.1.4 + +//[float] +//[[breaking-6.1.4]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] === Enhancements +Core:: +* Fix classes that can exit {pull}27518[#27518] + +[float] === Bug Fixes -=== Regressions +Aggregations:: +* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) + +Core:: +* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) + +Engine:: +* Avoid class cast exception from index writer {pull}28989[#28989] +* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) + +Scripting:: +* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.3]] +== {es} version 6.1.3 + +//[float] +//[[breaking-6.1.3]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Engine:: +* Replica recovery could go into an endless flushing loop {pull}28350[#28350] + +Internal:: +* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) +* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) + +Mapping:: +* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) + +Scripting:: +* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] + +Settings:: +* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) +* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) + +Snapshot/Restore:: +* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) +* Do not start snapshots that are deleted during initialization {pull}27931[#27931] + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.2]] +== {es} version 6.1.2 + +//[float] +//[[breaking-6.1.2]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Internal:: +* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) + +[float] +=== Bug Fixes + +Aggregations:: +* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) +* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] +* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) + +Engine:: +* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) +* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) + +Network:: +* Only bind loopback addresses when binding to local {pull}28029[#28029] + +Recovery:: +* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) + +Search:: +* Use the underlying connection version for CCS connections {pull}28093[#28093] +* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) + +Snapshot/Restore:: +* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) + +Translog:: +* Only sync translog when global checkpoint increased {pull}27973[#27973] (issues: {issue}27837[#27837], {issue}27970[#27970]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.1]] +== {es} version 6.1.1 + +//[float] +//[[breaking-6.1.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Snapshot/Restore:: +* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] + +[float] +=== Bug Fixes + +Inner Hits:: +* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) + +Java REST Client:: +* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) + +Search:: +* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) + +Sequence IDs:: +* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] +* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Ingest:: +* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] + +[[release-notes-6.1.0]] +== {es} version 6.1.0 + +[float] +[[breaking-6.1.0]] +=== Breaking Changes + +Network:: +* Allow only a fixed-size receive predictor {pull}26165[#26165] (issue: {issue}23185[#23185]) + +REST:: +* Standardize underscore requirements in parameters {pull}27414[#27414] (issues: {issue}26886[#26886], {issue}27040[#27040]) + +Scroll:: +* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] + +Search:: +* Add a limit to from + size in top_hits and inner hits. {pull}26492[#26492] (issue: {issue}11511[#11511]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Moves deferring code into its own subclass {pull}26421[#26421] + +Core:: +* Unify Settings xcontent reading and writing {pull}26739[#26739] + +Settings:: +* Return List instead of an array from settings {pull}26903[#26903] +* Remove `Settings,put(Map)` {pull}26785[#26785] + +[float] +=== Deprecations + +Aggregations:: +* Deprecate global_ordinals_hash and global_ordinals_low_cardinality {pull}26173[#26173] (issue: {issue}26014[#26014]) + +Allocation:: +* Add deprecation warning for negative index.unassigned.node_left.delayed_timeout {pull}26832[#26832] (issue: {issue}26828[#26828]) + +Analysis:: +* Add limits for ngram and shingle settings {pull}27411[#27411] (issues: {issue}25887[#25887], {issue}27211[#27211]) + +Geo:: +* [GEO] 6x Deprecate ShapeBuilders and decouple geojson parse logic {pull}27345[#27345] + +Mapping:: +* Deprecate the `index_options` parameter for numeric fields {pull}26672[#26672] (issue: {issue}21475[#21475]) + +Plugin Repository Azure:: +* Azure repository: Move to named configurations as we do for S3 repository and secure settings {pull}23405[#23405] (issues: {issue}22762[#22762], {issue}22763[#22763]) + +Search:: +* doc: deprecate _primary and _replica shard option {pull}26792[#26792] (issue: {issue}26335[#26335]) + +[float] +=== New Features + +Aggregations:: +* Aggregations: bucket_sort pipeline aggregation {pull}27152[#27152] (issue: {issue}14928[#14928]) +* Add composite aggregator {pull}26800[#26800] + +Analysis:: +* Added Bengali Analyzer to Elasticsearch with respect to the lucene update {pull}26527[#26527] + +Ingest:: +* add URL-Decode Processor to Ingest {pull}26045[#26045] (issue: {issue}25837[#25837]) + +Java High Level REST Client:: +* Added Delete Index support to high-level REST client {pull}27019[#27019] (issue: {issue}25847[#25847]) + +Nested Docs:: +* Multi-level Nested Sort with Filters {pull}26395[#26395] + +Query DSL:: +* Add terms_set query {pull}27145[#27145] (issue: {issue}26915[#26915]) +* Introduce sorted_after query for sorted index {pull}26377[#26377] +* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}26097[#26097] + +Search:: +* Expose `fuzzy_transpositions` parameter in fuzzy queries {pull}26870[#26870] (issue: {issue}18348[#18348]) +* Add upper limit for scroll expiry {pull}26448[#26448] (issues: {issue}11511[#11511], {issue}23268[#23268]) +* Implement adaptive replica selection {pull}26128[#26128] (issue: {issue}24915[#24915]) +* configure distance limit {pull}25731[#25731] (issue: {issue}25528[#25528]) + +Similarities:: +* Add a scripted similarity. {pull}25831[#25831] + +Suggesters:: +* Expose duplicate removal in the completion suggester {pull}26496[#26496] (issue: {issue}23364[#23364]) +* Support must and should for context query in context suggester {pull}26407[#26407] (issues: {issue}24421[#24421], {issue}24565[#24565]) + +[float] +=== Enhancements + +Aggregations:: +* Allow aggregation sorting via nested aggregation {pull}26683[#26683] (issue: {issue}16838[#16838]) + +Allocation:: +* Tie-break shard path decision based on total number of shards on path {pull}27039[#27039] (issue: {issue}26654[#26654]) +* Balance shards for an index more evenly across multiple data paths {pull}26654[#26654] (issue: {issue}16763[#16763]) +* Expand "NO" decision message in NodeVersionAllocationDecider {pull}26542[#26542] (issue: {issue}10403[#10403]) +* _reroute's retry_failed flag should reset failure counter {pull}25888[#25888] (issue: {issue}25291[#25291]) + +Analysis:: +* Add configurable `max_token_length` parameter to whitespace tokenizer {pull}26749[#26749] (issue: {issue}26643[#26643]) + +CRUD:: +* Add wait_for_active_shards parameter to index open command {pull}26682[#26682] (issue: {issue}20937[#20937]) + +Core:: +* Fix classes that can exit {pull}27518[#27518] +* Replace empty index block checks with global block checks in template delete/put actions {pull}27050[#27050] (issue: {issue}10530[#10530]) +* Allow Uid#decodeId to decode from a byte array slice {pull}26987[#26987] (issue: {issue}26931[#26931]) +* Use separate searchers for "search visibility" vs "move indexing buffer to disk {pull}26972[#26972] (issues: {issue}15768[#15768], {issue}26802[#26802], {issue}26912[#26912], {issue}3593[#3593]) +* Add ability to split shards {pull}26931[#26931] +* Make circuit breaker mutations debuggable {pull}26067[#26067] (issue: {issue}25891[#25891]) + +Dates:: +* DateProcessor Locale {pull}26186[#26186] (issue: {issue}25513[#25513]) + +Discovery:: +* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) + +Engine:: +* Ensure external refreshes will also refresh internal searcher to minimize segment creation {pull}27253[#27253] (issue: {issue}26972[#26972]) +* Move IndexShard#getWritingBytes() under InternalEngine {pull}27209[#27209] (issue: {issue}26972[#26972]) +* Refactor internal engine {pull}27082[#27082] + +Geo:: +* Add ignore_malformed to geo_shape fields {pull}24654[#24654] (issue: {issue}23747[#23747]) + +Ingest:: +* add json-processor support for non-map json types {pull}27335[#27335] (issue: {issue}25972[#25972]) +* Introduce templating support to timezone/locale in DateProcessor {pull}27089[#27089] (issue: {issue}24024[#24024]) +* Add support for parsing inline script (#23824) {pull}26846[#26846] (issue: {issue}23824[#23824]) +* Consolidate locale parsing. {pull}26400[#26400] +* Accept ingest simulate params as ints or strings {pull}23885[#23885] (issue: {issue}23823[#23823]) + +Internal:: +* Avoid uid creation in ParsedDocument {pull}27241[#27241] +* Upgrade to Lucene 7.1.0 snapshot version {pull}26864[#26864] (issue: {issue}26527[#26527]) +* Remove `_index` fielddata hack if cluster alias is present {pull}26082[#26082] (issue: {issue}25885[#25885]) + +Java High Level REST Client:: +* Adjust RestHighLevelClient method modifiers {pull}27238[#27238] +* Decouple BulkProcessor from ThreadPool {pull}26727[#26727] (issue: {issue}26028[#26028]) + +Logging:: +* Add more information on _failed_to_convert_ exception (#21946) {pull}27034[#27034] (issue: {issue}21946[#21946]) +* Improve shard-failed log messages. {pull}26866[#26866] + +Mapping:: +* Allow ip_range to accept CIDR notation {pull}27192[#27192] (issue: {issue}26260[#26260]) +* Deduplicate `_field_names`. {pull}26550[#26550] +* Throw a better error message for empty field names {pull}26543[#26543] (issue: {issue}23348[#23348]) +* Stricter validation for min/max values for whole numbers {pull}26137[#26137] +* Make FieldMapper.copyTo() always non-null. {pull}25994[#25994] + +Nested Docs:: +* Use the primary_term field to identify parent documents {pull}27469[#27469] (issue: {issue}24362[#24362]) +* Prohibit using `nested_filter`, `nested_path` and new `nested` Option at the same time in FieldSortBuilder {pull}26490[#26490] (issue: {issue}17286[#17286]) + +Network:: +* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) +* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) +* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) + +Percolator:: +* Use Lucene's CoveringQuery to select percolate candidate matches {pull}27271[#27271] (issues: {issue}26081[#26081], {issue}26307[#26307]) +* Add support to percolate query to percolate multiple documents simultaneously {pull}26418[#26418] +* Hint what clauses are important in a conjunction query based on fields {pull}26081[#26081] +* Add support for selecting percolator query candidate matches containing range queries {pull}25647[#25647] (issue: {issue}21040[#21040]) + +Plugin Discovery EC2:: +* update AWS SDK for ECS Task IAM support in discovery-ec2 {pull}26479[#26479] (issue: {issue}23039[#23039]) + +Plugin Lang Painless:: +* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] +* Allow for the Painless Definition to have multiple instances for white-listing {pull}27096[#27096] +* Separate Painless Whitelist Loading from the Painless Definition {pull}26540[#26540] +* Remove Sort enum from Painless Definition {pull}26179[#26179] + +Plugin Repository Azure:: +* Add azure storage endpoint suffix #26432 {pull}26568[#26568] (issue: {issue}26432[#26432]) +* Support for accessing Azure repositories through a proxy {pull}23518[#23518] (issues: {issue}23506[#23506], {issue}23517[#23517]) + +Plugin Repository S3:: +* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) +* Update to AWS SDK 1.11.223 {pull}27278[#27278] + +Plugins:: +* Plugins: Add versionless alias to all security policy codebase properties {pull}26756[#26756] (issue: {issue}26521[#26521]) +* Allow plugins to plug rescore implementations {pull}26368[#26368] (issue: {issue}26208[#26208]) + +Query DSL:: +* Add support for wildcard on `_index` {pull}27334[#27334] (issue: {issue}25722[#25722]) + +Reindex API:: +* Update by Query is modified to accept short `script` parameter. {pull}26841[#26841] (issue: {issue}24898[#24898]) +* reindex: automatically choose the number of slices {pull}26030[#26030] (issues: {issue}24547[#24547], {issue}25582[#25582]) + +Rollover:: +* Add size-based condition to the index rollover API {pull}27160[#27160] (issue: {issue}27004[#27004]) +* Add size-based condition to the index rollover API {pull}27115[#27115] (issue: {issue}27004[#27004]) + +Scripting:: +* Script: Convert script query to a dedicated script context {pull}26003[#26003] + +Search:: +* Make fields optional in multi_match query and rely on index.query.default_field by default {pull}27380[#27380] +* fix unnecessary logger creation {pull}27349[#27349] +* `ObjectParser` : replace `IllegalStateException` with `ParsingException` {pull}27302[#27302] (issue: {issue}27147[#27147]) +* Uses norms for exists query if enabled {pull}27237[#27237] +* Cross Cluster Search: make remote clusters optional {pull}27182[#27182] (issues: {issue}26118[#26118], {issue}27161[#27161]) +* Enhances exists queries to reduce need for `_field_names` {pull}26930[#26930] (issue: {issue}26770[#26770]) +* Change ParentFieldSubFetchPhase to create doc values iterator once per segment {pull}26815[#26815] +* Change VersionFetchSubPhase to create doc values iterator once per segment {pull}26809[#26809] +* Change ScriptFieldsFetchSubPhase to create search scripts once per segment {pull}26808[#26808] (issue: {issue}26775[#26775]) +* Make sure SortBuilders rewrite inner nested sorts {pull}26532[#26532] +* Extend testing of build method in ScriptSortBuilder {pull}26520[#26520] (issues: {issue}17286[#17286], {issue}26490[#26490]) +* Accept an array of field names and boosts in the index.query.default_field setting {pull}26320[#26320] (issue: {issue}25946[#25946]) +* Reject IPv6-mapped IPv4 addresses when using the CIDR notation. {pull}26254[#26254] (issue: {issue}26078[#26078]) +* Rewrite range queries with open bounds to exists query {pull}26160[#26160] (issue: {issue}22640[#22640]) + +Sequence IDs:: +* Only fsync global checkpoint if needed {pull}27652[#27652] +* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) +* Lazy initialize checkpoint tracker bit sets {pull}27179[#27179] (issue: {issue}10708[#10708]) +* Returns the current primary_term for Get/MultiGet requests {pull}27177[#27177] (issue: {issue}26493[#26493]) + +Settings:: +* Allow affix settings to specify dependencies {pull}27161[#27161] +* Represent lists as actual lists inside Settings {pull}26878[#26878] (issue: {issue}26723[#26723]) +* Remove Settings#getAsMap() {pull}26845[#26845] +* Replace group map settings with affix setting {pull}26819[#26819] +* Throw exception if setting isn't recognized {pull}26569[#26569] (issue: {issue}25607[#25607]) +* Settings: Move keystore creation to plugin installation {pull}26329[#26329] (issue: {issue}26309[#26309]) + +Snapshot/Restore:: +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] +* Snapshot: Migrate TransportRequestHandler to TransportMasterNodeAction {pull}27165[#27165] (issue: {issue}27151[#27151]) +* Fix toString of class SnapshotStatus (#26851) {pull}26852[#26852] (issue: {issue}26851[#26851]) + +Stats:: +* Adds average document size to DocsStats {pull}27117[#27117] (issue: {issue}27004[#27004]) +* Stats to record how often the ClusterState diff mechanism is used successfully {pull}27107[#27107] (issue: {issue}26973[#26973]) +* Expose adaptive replica selection stats in /_nodes/stats API {pull}27090[#27090] +* Add cgroup memory usage/limit to OS stats on Linux {pull}26166[#26166] +* Add segment attributes to the `_segments` API. {pull}26157[#26157] (issue: {issue}26130[#26130]) + +Suggesters:: +* Improve error message for parse failures of completion fields {pull}27297[#27297] +* Support 'AND' operation for context query in context suggester {pull}24565[#24565] (issue: {issue}24421[#24421]) + +[float] +=== Bug Fixes + +Aggregations:: +* Disable the "low cardinality" optimization of terms aggregations. {pull}27545[#27545] (issue: {issue}27543[#27543]) +* scripted_metric _agg parameter disappears if params are provided {pull}27159[#27159] (issues: {issue}19768[#19768], {issue}19863[#19863]) + +Cluster:: +* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] +* Remove optimisations to reuse objects when applying a new `ClusterState` {pull}27317[#27317] + +Core:: +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) +* Protect shard splitting from illegal target shards {pull}27468[#27468] (issue: {issue}26931[#26931]) +* Avoid NPE when getting build information {pull}27442[#27442] +* Fix `ShardSplittingQuery` to respect nested documents. {pull}27398[#27398] (issue: {issue}27378[#27378]) +* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) + +Engine:: +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) +* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) + +Geo:: +* Correct two equality checks on incomparable types {pull}27688[#27688] +* [GEO] fix pointsOnly bug for MULTIPOINT {pull}27415[#27415] + +Index Templates:: +* Prevent constructing an index template without index patterns {pull}27662[#27662] + +Ingest:: +* Add pipeline support for REST API bulk upsert {pull}27075[#27075] (issue: {issue}25601[#25601]) +* Fixing Grok pattern for Apache 2.4 {pull}26635[#26635] + +Inner Hits:: +* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] + +Internal:: +* When checking if key exists in ThreadContextStruct:putHeaders() method,should put requestHeaders in map first {pull}26068[#26068] +* Adding a refresh listener to a recovering shard should be a noop {pull}26055[#26055] + +Java High Level REST Client:: +* Register ip_range aggregation with the high level client {pull}26383[#26383] +* add top hits as a parsed aggregation to the rest high level client {pull}26370[#26370] + +Mapping:: +* Fix dynamic mapping update generation. {pull}27467[#27467] +* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) +* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) + +Nested Docs:: +* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] +* Prevent duplicate fields when mixing parent and root nested includes {pull}27072[#27072] (issue: {issue}26990[#26990]) + +Network:: +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) +* Do not set SO_LINGER on server channels {pull}26997[#26997] +* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) +* Close TcpTransport on RST in some Spots to Prevent Leaking TIME_WAIT Sockets {pull}26764[#26764] (issue: {issue}26701[#26701]) + +Packaging:: +* Removes minimum master nodes default number {pull}26803[#26803] +* setgid on /etc/elasticearch on package install {pull}26412[#26412] (issue: {issue}26410[#26410]) + +Percolator:: +* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) + +Plugin Analysis ICU:: +* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] + +Plugin Lang Painless:: +* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository GCS:: +* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) + +Recovery:: +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Reindex API:: +* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) + +Scroll:: +* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] + +Search:: +* Fix profiling naming issues {pull}27133[#27133] +* Fix max score tracking with field collapsing {pull}27122[#27122] (issue: {issue}23840[#23840]) +* Apply missing request options to the expand phase {pull}27118[#27118] (issues: {issue}26649[#26649], {issue}27079[#27079]) +* Calculate and cache result when advanceExact is called {pull}26920[#26920] (issue: {issue}26817[#26817]) +* Filter unsupported relation for RangeQueryBuilder {pull}26620[#26620] (issue: {issue}26575[#26575]) +* Handle leniency for phrase query on a field indexed without positions {pull}26388[#26388] + +Sequence IDs:: +* Obey translog durability in global checkpoint sync {pull}27641[#27641] +* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) + +Settings:: +* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) + +Snapshot/Restore:: +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) +* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) +* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) +* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) + +Stats:: +* Include internal refreshes in refresh stats {pull}27615[#27615] +* Make Segment statistics aware of segments hold by internal readers {pull}27558[#27558] +* Ensure `doc_stats` are changing even if refresh is disabled {pull}27505[#27505] + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to Jackson 2.8.10 {pull}27230[#27230] +* Upgrade to Lucene 7.1 {pull}27225[#27225] + +Plugin Discovery EC2:: +* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) + +Plugin Discovery GCE:: +* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) + +Plugin Lang Painless:: +* Upgrade Painless from ANTLR 4.5.1-1 to ANTLR 4.5.3. {pull}27153[#27153] + +[[release-notes-6.0.1]] +== {es} version 6.0.1 + +[float] +[[breaking-6.0.1]] +=== Breaking Changes + +Scroll:: +* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Fix classes that can exit {pull}27518[#27518] + +Discovery:: +* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) + +Plugin Repository S3:: +* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) +* Update to AWS SDK 1.11.223 {pull}27278[#27278] + +Search:: +* fix unnecessary logger creation {pull}27349[#27349] + +Sequence IDs:: +* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) + +Snapshot/Restore:: +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] + +[float] +=== Bug Fixes + +Cluster:: +* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] + +Core:: +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Avoid NPE when getting build information {pull}27442[#27442] +* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) + +Engine:: +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) +* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) + +Inner Hits:: +* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] + +Mapping:: +* Fix dynamic mapping update generation. {pull}27467[#27467] +* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) + +Nested Docs:: +* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] + +Network:: +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) + +Plugin Lang Painless:: +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository GCS:: +* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) + +Recovery:: +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Reindex API:: +* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) + +Search:: +* Fix profiling naming issues {pull}27133[#27133] + +Sequence IDs:: +* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) + +Snapshot/Restore:: +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) +* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) +* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) +* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Plugin Discovery EC2:: +* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) + +Plugin Discovery GCE:: +* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) + +[[release-notes-6.0.0]] +== {es} version 6.0.0 + +[float] +[[breaking-6.0.0]] +=== Breaking Changes + +Aggregations:: +* Change parsing of numeric `to` and `from` parameters in `date_range` aggregation {pull}25376[#25376] (issue: {issue}17920[#17920]) + +Aliases:: +* Wrong behavior deleting alias {pull}23997[#23997] (issues: {issue}10106[#10106], {issue}23960[#23960]) + +Allocation:: +* Remove `cluster.routing.allocation.snapshot.relocation_enabled` setting {pull}20994[#20994] + +Analysis:: +* Do not allow custom analyzers to have the same names as built-in analyzers {pull}22349[#22349] (issue: {issue}22263[#22263]) +* Removing query-string parameters in `_analyze` API {pull}20704[#20704] (issue: {issue}20246[#20246]) + +CAT API:: +* Write -1 on unbounded queue in cat thread pool {pull}21342[#21342] (issue: {issue}21187[#21187]) + +CRUD:: +* Disallow `VersionType.FORCE` for GetRequest {pull}21079[#21079] (issue: {issue}20995[#20995]) +* Disallow `VersionType.FORCE` versioning for 6.x indices {pull}20995[#20995] (issue: {issue}20377[#20377]) +* If the index does not exist, delete document will not auto create it {pull}24518[#24518] (issue: {issue}15425[#15425]) + +Cluster:: +* Disallow : in cluster and index/alias names {pull}26247[#26247] (issue: {issue}23892[#23892]) +* No longer allow cluster name in data path {pull}20433[#20433] (issue: {issue}20391[#20391]) + +Core:: +* Simplify file store {pull}24402[#24402] (issue: {issue}24390[#24390]) +* Make boolean conversion strict {pull}22200[#22200] +* Remove the `default` store type. {pull}21616[#21616] +* Remove store throttling. {pull}21573[#21573] + +Geo:: +* Remove deprecated geo search features {pull}22876[#22876] +* Reduce GeoDistance Insanity {pull}19846[#19846] + +Highlighting:: +* Remove the postings highlighter and make unified the default highlighter choice {pull}25028[#25028] + +Index APIs:: +* Remove (deprecated) support for '+' in index expressions {pull}25274[#25274] (issue: {issue}24515[#24515]) +* Delete index API to work only against concrete indices {pull}25268[#25268] (issues: {issue}2318[#2318], {issue}23997[#23997]) +* Open/Close index api to allow_no_indices by default {pull}24401[#24401] (issues: {issue}24031[#24031], {issue}24341[#24341]) +* Remove support for controversial `ignore_unavailable` and `allow_no_indices` from indices exists api {pull}20712[#20712] + +Index Templates:: +* Allows multiple patterns to be specified for index templates {pull}21009[#21009] (issue: {issue}20690[#20690]) + +Indexed Scripts/Templates:: +* Scripting: Remove search template actions {pull}25717[#25717] + +Ingest:: +* update ingest-user-agent regexes.yml {pull}25608[#25608] +* remove ingest.new_date_format {pull}25583[#25583] + +Inner Hits:: +* Return the _source of inner hit nested as is without wrapping it into its full path context {pull}26982[#26982] (issues: {issue}26102[#26102], {issue}26944[#26944]) + +Java API:: +* Enforce Content-Type requirement on the rest layer and remove deprecated methods {pull}23146[#23146] (issue: {issue}19388[#19388]) + +Java REST Client:: +* Remove deprecated created and found from index, delete and bulk {pull}25516[#25516] (issues: {issue}19566[#19566], {issue}19630[#19630], {issue}19633[#19633]) + +Mapping:: +* Reject out of range numbers for float, double and half_float {pull}25826[#25826] (issue: {issue}25534[#25534]) +* Enforce at most one type. {pull}24428[#24428] (issue: {issue}24317[#24317]) +* Disallow `include_in_all` for 6.0+ indices {pull}22970[#22970] (issue: {issue}22923[#22923]) +* Disable _all by default, disallow configuring _all on 6.0+ indices {pull}22144[#22144] (issues: {issue}19784[#19784], {issue}20925[#20925], {issue}21341[#21341]) +* Throw an exception on unrecognized "match_mapping_type" {pull}22090[#22090] (issue: {issue}17285[#17285]) + +Network:: +* Remove unused Netty-related settings {pull}26161[#26161] +* Remove blocking TCP clients and servers {pull}22639[#22639] +* Remove `modules/transport_netty_3` in favor of `netty_4` {pull}21590[#21590] +* Remove LocalTransport in favor of MockTcpTransport {pull}20695[#20695] + +Packaging:: +* Configure heap dump path out of the box {pull}26755[#26755] (issue: {issue}26665[#26665]) +* Remove support for ES_INCLUDE {pull}25804[#25804] +* Setup: Change default heap to 1G {pull}25695[#25695] +* Use config directory to find jvm.options {pull}25679[#25679] (issue: {issue}23004[#23004]) +* Remove implicit 32-bit support {pull}25435[#25435] +* Remove default path settings {pull}25408[#25408] (issue: {issue}25357[#25357]) +* Remove path.conf setting {pull}25392[#25392] (issue: {issue}25357[#25357]) +* Honor masking of systemd-sysctl.service {pull}24234[#24234] (issues: {issue}21899[#21899], {issue}806[#806]) +* Rename CONF_DIR to ES_PATH_CONF {pull}26197[#26197] (issue: {issue}26154[#26154]) +* Remove customization of ES_USER and ES_GROUP {pull}23989[#23989] (issue: {issue}23848[#23848]) + +Percolator:: +* Remove deprecated percolate and mpercolate apis {pull}22331[#22331] + +Plugin Analysis ICU:: +* Upgrade icu4j for the ICU analysis plugin to 59.1 {pull}25243[#25243] (issue: {issue}21425[#21425]) +* Upgrade icu4j to latest version {pull}24821[#24821] + +Plugin Delete By Query:: +* Require explicit query in _delete_by_query API {pull}23632[#23632] (issue: {issue}23629[#23629]) + +Plugin Discovery Azure Classic:: +* Remove `discovery.type` BWC layer from the EC2/Azure/GCE plugins {pull}25080[#25080] (issue: {issue}24543[#24543]) + +Plugin Discovery EC2:: +* Ec2 Discovery: Cleanup deprecated settings {pull}24150[#24150] +* Discovery EC2: Remove region setting {pull}23991[#23991] (issue: {issue}22758[#22758]) +* AWS Plugins: Remove signer type setting {pull}23984[#23984] (issue: {issue}22599[#22599]) + +Plugin Lang JS:: +* Remove lang-python and lang-javascript {pull}20734[#20734] (issue: {issue}20698[#20698]) + +Plugin Mapper Attachment:: +* Remove mapper attachments plugin {pull}20416[#20416] (issue: {issue}18837[#18837]) + +Plugin Repository Azure:: +* Remove global `repositories.azure` settings {pull}23262[#23262] (issues: {issue}22800[#22800], {issue}22856[#22856]) +* Remove auto creation of container for azure repository {pull}22858[#22858] (issue: {issue}22857[#22857]) + +Plugin Repository GCS:: +* GCS Repository: Remove specifying credential file on disk {pull}24727[#24727] + +Plugin Repository S3:: +* S3 Repository: Cleanup deprecated settings {pull}24097[#24097] +* S3 Repository: Remove region setting {pull}22853[#22853] (issue: {issue}22758[#22758]) +* S3 Repository: Remove bucket auto create {pull}22846[#22846] (issue: {issue}22761[#22761]) +* S3 Repository: Remove env var and sysprop credentials support {pull}22842[#22842] +* Remove deprecated S3 settings {pull}24445[#24445] + +Plugins:: +* Make plugin loading stricter {pull}25405[#25405] + +Query DSL:: +* Remove deprecated `type` and `slop` field in `match` query {pull}26720[#26720] +* Remove several parse field deprecations in query builders {pull}26711[#26711] +* Remove deprecated parameters from `ids_query` {pull}26508[#26508] +* Refactor QueryStringQuery for 6.0 {pull}25646[#25646] (issue: {issue}25574[#25574]) +* Change `split_on_whitespace` default to false {pull}25570[#25570] (issue: {issue}25470[#25470]) +* Remove deprecated template query {pull}24577[#24577] (issue: {issue}19390[#19390]) +* Throw exception in scroll requests using `from` {pull}26235[#26235] (issue: {issue}9373[#9373]) +* Remove deprecated `minimum_number_should_match` in BoolQueryBuilder {pull}22416[#22416] +* Remove support for empty queries {pull}22092[#22092] (issue: {issue}17624[#17624]) +* Remove deprecated query names: in, geo_bbox, mlt, fuzzy_match and match_fuzzy {pull}21852[#21852] +* The `terms` query should always map to a Lucene `TermsQuery`. {pull}21786[#21786] +* Be strict when parsing values searching for booleans {pull}21555[#21555] (issue: {issue}21545[#21545]) +* Remove collect payloads parameter {pull}20385[#20385] + +REST:: +* IndexClosedException to return 400 rather than 403 {pull}25752[#25752] +* Remove comma-separated feature parsing for GetIndicesAction {pull}24723[#24723] (issue: {issue}24437[#24437]) +* Improve REST error handling when endpoint does not support HTTP verb, add OPTIONS support {pull}24437[#24437] (issues: {issue}0[#0], {issue}15335[#15335], {issue}17916[#17916]) +* Remove ldjson support and document ndjson for bulk/msearch {pull}23049[#23049] (issue: {issue}23025[#23025]) +* Enable strict duplicate checks for all XContent types {pull}22225[#22225] (issues: {issue}19614[#19614], {issue}22073[#22073]) +* Enable strict duplicate checks for JSON content {pull}22073[#22073] (issue: {issue}19614[#19614]) +* Remove lenient stats parsing {pull}21417[#21417] (issues: {issue}20722[#20722], {issue}21410[#21410]) +* Remove allow unquoted JSON {pull}20388[#20388] (issues: {issue}17674[#17674], {issue}17801[#17801]) +* Remove FORCE version_type {pull}20377[#20377] (issue: {issue}19769[#19769]) + +Scripting:: +* remove lang url parameter from stored script requests {pull}25779[#25779] (issue: {issue}22887[#22887]) +* Disallow lang to be used with Stored Scripts {pull}25610[#25610] +* Remove Deprecated Script Settings {pull}24756[#24756] (issue: {issue}24532[#24532]) +* Scripting: Remove native scripts {pull}24726[#24726] (issue: {issue}19966[#19966]) +* Scripting: Remove file scripts {pull}24627[#24627] (issue: {issue}21798[#21798]) +* Make dates be ReadableDateTimes in scripts {pull}22948[#22948] (issue: {issue}22875[#22875]) +* Remove groovy scripting language {pull}21607[#21607] +* Remove script access to term statistics {pull}19462[#19462] (issue: {issue}19359[#19359]) + +Search:: +* Make `index` in TermsLookup mandatory {pull}25753[#25753] (issue: {issue}25750[#25750]) +* Removes FieldStats API {pull}25628[#25628] (issue: {issue}25577[#25577]) +* Remove deprecated fielddata_fields from search request {pull}25566[#25566] (issue: {issue}25537[#25537]) +* Removes deprecated fielddata_fields {pull}25537[#25537] (issue: {issue}19027[#19027]) +* ProfileResult and CollectorResult should print machine readable timing information {pull}22561[#22561] +* Remove indices query {pull}21837[#21837] (issue: {issue}17710[#17710]) +* Remove ignored type parameter in search_shards api {pull}21688[#21688] + +Sequence IDs:: +* Change certain replica failures not to fail the replica shard {pull}22874[#22874] (issue: {issue}10708[#10708]) + +Settings:: +* Settings: Remove shared setting property {pull}24728[#24728] +* Settings: Remove support for yaml and json config files {pull}24664[#24664] (issue: {issue}19391[#19391]) + +Shadow Replicas:: +* Remove shadow replicas {pull}23906[#23906] (issue: {issue}22024[#22024]) + +Similarities:: +* Similarity should accept dynamic settings when possible {pull}20339[#20339] (issue: {issue}6727[#6727]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Remove the unused SignificantTerms.compareTerm() method {pull}24714[#24714] +* Make SignificantTerms.Bucket an interface rather than an abstract class {pull}24670[#24670] (issue: {issue}24492[#24492]) +* Fix NPE when `values` is omitted on percentile_ranks agg {pull}26046[#26046] +* Make Terms.Bucket an interface rather than an abstract class {pull}24492[#24492] +* Compound order for histogram aggregations {pull}22343[#22343] (issues: {issue}14771[#14771], {issue}20003[#20003], {issue}23613[#23613]) + +Internal:: +* Collapses package structure for some bucket aggs {pull}25579[#25579] (issue: {issue}22868[#22868]) + +Java API:: +* Remove deprecated IdsQueryBuilder ctor {pull}25529[#25529] +* Removing unneeded getTookInMillis method {pull}23923[#23923] +* Java api: ActionRequestBuilder#execute to return a PlainActionFuture {pull}24415[#24415] (issues: {issue}24412[#24412], {issue}9201[#9201]) + +Java High Level REST Client:: +* Unify the result interfaces from get and search in Java client {pull}25361[#25361] (issue: {issue}16440[#16440]) +* Allow RestHighLevelClient to use plugins {pull}25024[#25024] + +Java REST Client:: +* Rename client artifacts {pull}25693[#25693] (issue: {issue}20248[#20248]) + +Network:: +* Simplify TransportAddress {pull}20798[#20798] + +Plugin Delete By Query:: +* Move DeleteByQuery and Reindex requests into core {pull}24578[#24578] + +Plugins:: +* Drop name from TokenizerFactory {pull}24869[#24869] + +Query DSL:: +* Remove QueryParseContext {pull}25486[#25486] +* Remove QueryParseContext from parsing QueryBuilders {pull}25448[#25448] + +REST:: +* Return index name and empty map for `/{index}/_alias` with no aliases {pull}25114[#25114] (issues: {issue}24723[#24723], {issue}25090[#25090]) + +[float] +=== Deprecations + +Index APIs:: +* Deprecated use of + in index expressions {pull}24585[#24585] (issue: {issue}24515[#24515]) + +Index Templates:: +* Restore deprecation warning for invalid match_mapping_type values {pull}22304[#22304] + +Indexed Scripts/Templates:: +* Scripting: Deprecate stored search template apis {pull}25437[#25437] (issue: {issue}24596[#24596]) + +Internal:: +* Deprecate XContentType auto detection methods in XContentFactory {pull}22181[#22181] (issue: {issue}19388[#19388]) + +Percolator:: +* Deprecate percolate query's document_type parameter. {pull}25199[#25199] + +Plugins:: +* Plugins: Add backcompat for sha1 checksums {pull}26748[#26748] (issue: {issue}26746[#26746]) + +Scripting:: +* Scripting: Change keys for inline/stored scripts to source/id {pull}25127[#25127] +* Scripting: Deprecate native scripts {pull}24692[#24692] (issue: {issue}19966[#19966]) +* Scripting: Deprecate index lookup {pull}24691[#24691] (issue: {issue}19359[#19359]) +* Deprecate Fine Grain Settings for Scripts {pull}24573[#24573] (issue: {issue}24532[#24532]) +* Scripting: Deprecate file script settings {pull}24555[#24555] (issue: {issue}21798[#21798]) +* Scripting: Deprecate file scripts {pull}24552[#24552] (issue: {issue}21798[#21798]) + +Settings:: +* Settings: Update settings deprecation from yml to yaml {pull}24663[#24663] (issue: {issue}19391[#19391]) +* Deprecate settings in .yml and .json {pull}24059[#24059] (issue: {issue}19391[#19391]) + +Tribe Node:: +* Deprecate tribe service {pull}24598[#24598] (issue: {issue}24581[#24581]) + +[float] +=== New Features + +Aggregations:: +* SignificantText aggregation - like significant_terms, but for text {pull}24432[#24432] (issue: {issue}23674[#23674]) + +Analysis:: +* Expose simplepattern and simplepatternsplit tokenizers {pull}25159[#25159] (issue: {issue}23363[#23363]) +* Parse synonyms with the same analysis chain {pull}8049[#8049] (issue: {issue}7199[#7199]) + +Core:: +* Enable index-time sorting {pull}24055[#24055] (issue: {issue}6720[#6720]) + +Internal:: +* Automatically adjust search threadpool queue_size {pull}23884[#23884] (issue: {issue}3890[#3890]) + +Mapping:: +* Add new ip_range field type {pull}24433[#24433] + +Parent/Child:: +* Move parent_id query to the parent-join module {pull}25072[#25072] (issue: {issue}20257[#20257]) +* Introduce ParentJoinFieldMapper, a field mapper that creates parent/child relation within documents of the same index {pull}24978[#24978] (issue: {issue}20257[#20257]) + +Plugin Analysis ICU:: +* Add ICUCollationFieldMapper {pull}24126[#24126] + +Search:: +* Automatically early terminate search query based on index sorting {pull}24864[#24864] (issue: {issue}6720[#6720]) + +Sequence IDs:: +* Add a scheduled translog retention check {pull}25622[#25622] (issues: {issue}10708[#10708], {issue}25294[#25294]) +* Initialize sequence numbers on a shrunken index {pull}25321[#25321] (issue: {issue}10708[#10708]) +* Initialize primary term for shrunk indices {pull}25307[#25307] (issue: {issue}10708[#10708]) +* Introduce translog size and age based retention policies {pull}25147[#25147] (issue: {issue}10708[#10708]) + +Stats:: +* Adds nodes usage API to monitor usages of actions {pull}24169[#24169] + +Task Manager:: +* Task Management [ISSUE] {pull}15117[#15117] + +Upgrade API:: +* TemplateUpgraders should be called during rolling restart {pull}25263[#25263] (issues: {issue}24379[#24379], {issue}24680[#24680]) + +[float] +=== Enhancements + +Aggregations:: +* Add strict parsing of aggregation ranges {pull}25769[#25769] +* Adds rewrite phase to aggregations {pull}25495[#25495] (issue: {issue}17676[#17676]) +* Tweak AggregatorBase.addRequestCircuitBreakerBytes {pull}25162[#25162] (issue: {issue}24511[#24511]) +* Add superset size to Significant Term REST response {pull}24865[#24865] +* Add document count to Matrix Stats aggregation response {pull}24776[#24776] +* Adds an implementation of LogLogBeta for the cardinality aggregation {pull}22323[#22323] (issue: {issue}22230[#22230]) +* Support distance units in GeoHashGrid aggregation precision {pull}26291[#26291] (issue: {issue}5042[#5042]) +* Reject multiple methods in `percentiles` aggregation {pull}26163[#26163] (issue: {issue}26095[#26095]) +* Use `global_ordinals_hash` execution mode when sorting by sub aggregations. {pull}26014[#26014] (issue: {issue}24359[#24359]) +* Add a specialized deferring collector for terms aggregator {pull}25190[#25190] +* Agg builder accessibility fixes {pull}24323[#24323] +* Remove support for the include/pattern syntax. {pull}23141[#23141] (issue: {issue}22933[#22933]) +* Promote longs to doubles when a terms agg mixes decimal and non-decimal numbers {pull}22449[#22449] (issue: {issue}22232[#22232]) + +Allocation:: +* Adjust status on bad allocation explain requests {pull}25503[#25503] (issue: {issue}25458[#25458]) +* Promote replica on the highest version node {pull}25277[#25277] (issue: {issue}10708[#10708]) + +Analysis:: +* [Analysis] Support normalizer in request param {pull}24767[#24767] (issue: {issue}23347[#23347]) +* Enforce validation for PathHierarchy tokenizer {pull}23510[#23510] +* [analysis-icu] Allow setting unicodeSetFilter {pull}20814[#20814] (issue: {issue}20820[#20820]) +* Match- and MultiMatchQueryBuilder should only allow setting analyzer on string values {pull}23684[#23684] (issue: {issue}21665[#21665]) + +Bulk:: +* Simplify bulk request execution {pull}20109[#20109] + +CAT API:: +* expand `/_cat/nodes` to return information about hard drive {pull}21775[#21775] (issue: {issue}21679[#21679]) + +CRUD:: +* Added validation for upsert request {pull}24282[#24282] (issue: {issue}16671[#16671]) + +Circuit Breakers:: +* ScriptService: Replace max compilation per minute setting with max compilation rate {pull}26399[#26399] + +Cluster:: +* Validate a joining node's version with version of existing cluster nodes {pull}25808[#25808] +* Switch indices read-only if a node runs out of disk space {pull}25541[#25541] (issue: {issue}24299[#24299]) +* Add a cluster block that allows to delete indices that are read-only {pull}24678[#24678] +* Separate publishing from applying cluster states {pull}24236[#24236] +* Adds cluster state size to /_cluster/state response {pull}23440[#23440] (issue: {issue}3415[#3415]) + +Core:: +* Allow `InputStreamStreamInput` array size validation where applicable {pull}26692[#26692] +* Refactor bootstrap check results and error messages {pull}26637[#26637] +* Add BootstrapContext to expose settings and recovered state to bootstrap checks {pull}26628[#26628] +* Unit testable index creation task on MetaDataCreateIndexService {pull}25961[#25961] +* Ignore .DS_Store files on macOS {pull}27108[#27108] (issue: {issue}23982[#23982]) +* Add max file size bootstrap check {pull}25974[#25974] +* Add compatibility versions to main action response {pull}25799[#25799] +* Index ids in binary form. {pull}25352[#25352] (issues: {issue}18154[#18154], {issue}24615[#24615]) +* Explicitly reject duplicate data paths {pull}25178[#25178] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25097[#25097] +* Upgrade to lucene-7.0.0-snapshot-a0aef2f {pull}24775[#24775] +* Speed up PK lookups at index time. {pull}19856[#19856] +* Use Java 9 FilePermission model {pull}26302[#26302] (issue: {issue}21534[#21534]) +* Add friendlier message on bad keystore permissions {pull}26284[#26284] +* Epoch millis and second formats accept float implicitly {pull}26119[#26119] (issue: {issue}14641[#14641]) +* Remove connect SocketPermissions from core {pull}22797[#22797] +* Add repository-url module and move URLRepository {pull}22752[#22752] (issue: {issue}22116[#22116]) +* Remove accept SocketPermissions from core {pull}22622[#22622] (issue: {issue}22116[#22116]) +* Move IfConfig.logIfNecessary call into bootstrap {pull}22455[#22455] (issue: {issue}22116[#22116]) +* Remove artificial default processors limit {pull}20874[#20874] (issue: {issue}20828[#20828]) +* Simplify write failure handling {pull}19105[#19105] (issue: {issue}20109[#20109]) +* Improve bootstrap checks error messages {pull}24548[#24548] + +Discovery:: +* Allow plugins to validate cluster-state on join {pull}26595[#26595] + +Engine:: +* Add refresh stats tracking for realtime get {pull}25052[#25052] (issue: {issue}24806[#24806]) +* Introducing a translog deletion policy {pull}24950[#24950] +* Fill missing sequence IDs up to max sequence ID when recovering from store {pull}24238[#24238] (issue: {issue}10708[#10708]) +* Use sequence numbers to identify out of order delivery in replicas & recovery {pull}24060[#24060] (issue: {issue}10708[#10708]) +* Add replica ops with version conflict to translog {pull}22626[#22626] +* Clarify global checkpoint recovery {pull}21934[#21934] (issue: {issue}21254[#21254]) +* Move the IndexDeletionPolicy to be engine internal {pull}24930[#24930] (issue: {issue}10708[#10708]) + +Exceptions:: +* IllegalStateException: Only duplicated jar instead of classpath {pull}24953[#24953] + +Highlighting:: +* Picks offset source for the unified highlighter directly from the es mapping {pull}25747[#25747] (issue: {issue}25699[#25699]) + +Index APIs:: +* Let primary own its replication group {pull}25692[#25692] (issue: {issue}25485[#25485]) +* Create index request should return the index name {pull}25139[#25139] (issue: {issue}23044[#23044]) + +Index Templates:: +* Fix error message for a put index template request without index_patterns {pull}27102[#27102] (issue: {issue}27100[#27100]) + +Ingest:: +* Add Ingest-Processor specific Rest Endpoints & Add Grok endpoint {pull}25059[#25059] (issue: {issue}24725[#24725]) +* Port support for commercial GeoIP2 databases from Logstash. {pull}24889[#24889] +* add `exclude_keys` option to KeyValueProcessor {pull}24876[#24876] (issue: {issue}23856[#23856]) +* Allow removing multiple fields in ingest processor {pull}24750[#24750] (issue: {issue}24622[#24622]) +* Add target_field parameter to ingest processors {pull}24133[#24133] (issues: {issue}23228[#23228], {issue}23682[#23682]) + +Inner Hits:: +* Reuse inner hit query weight {pull}24571[#24571] (issue: {issue}23917[#23917]) + +Internal:: +* TemplateUpgradeService should only run on the master {pull}27294[#27294] +* Cleanup IndexFieldData visibility {pull}25900[#25900] +* Bump the min compat version to 5.6.0 {pull}25805[#25805] +* "shard started" should show index and shard ID {pull}25157[#25157] +* Break out clear scroll logic from TransportClearScrollAction {pull}25125[#25125] (issue: {issue}25094[#25094]) +* Add helper methods to TransportActionProxy to identify proxy actions and requests {pull}25124[#25124] +* Add remote cluster infrastructure to fetch discovery nodes. {pull}25123[#25123] (issue: {issue}25094[#25094]) +* Add the ability to set eager_global_ordinals in the new parent-join field {pull}25019[#25019] +* Disallow multiple parent-join fields per mapping {pull}25002[#25002] +* Remove the need for _UNRELEASED suffix in versions {pull}24798[#24798] (issue: {issue}24768[#24768]) +* Optimize the order of bytes in uuids for better compression. {pull}24615[#24615] (issue: {issue}18209[#18209]) +* Prevent cluster internal `ClusterState.Custom` impls to leak to a client {pull}26232[#26232] +* Use holder pattern for lazy deprecation loggers {pull}26218[#26218] (issue: {issue}26210[#26210]) +* Allow `ClusterState.Custom` to be created on initial cluster states {pull}26144[#26144] +* Try to convince the JVM not to lose stacktraces {pull}24426[#24426] (issue: {issue}24376[#24376]) +* Make document write requests immutable {pull}23038[#23038] +* Add assertions enabled helper {pull}24834[#24834] + +Java API:: +* Always Accumulate Transport Exceptions {pull}25017[#25017] (issue: {issue}23099[#23099]) + +Java High Level REST Client:: +* [DOCS] restructure java clients docs pages {pull}25517[#25517] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25098[#25098] (issues: {issue}25024[#25024], {issue}25097[#25097]) +* Add support for clear scroll to high level REST client {pull}25038[#25038] +* Add search scroll method to high level REST client {pull}24938[#24938] (issue: {issue}23331[#23331]) +* Add search method to high level REST client {pull}24796[#24796] (issues: {issue}24794[#24794], {issue}24795[#24795]) +* Make RestHighLevelClient Closeable and simplify its creation {pull}26180[#26180] (issue: {issue}26086[#26086]) +* Add info method to High Level Rest client {pull}23350[#23350] +* Add support for named xcontent parsers to high level REST client {pull}23328[#23328] +* Add BulkRequest support to High Level Rest client {pull}23312[#23312] +* Add UpdateRequest support to High Level Rest client {pull}23266[#23266] +* Add delete API to the High Level Rest Client {pull}23187[#23187] +* Add Index API to High Level Rest Client {pull}23040[#23040] +* Add get/exists method to RestHighLevelClient {pull}22706[#22706] +* Add fromxcontent methods to delete response {pull}22680[#22680] (issue: {issue}22229[#22229]) +* Add REST high level client gradle submodule and first simple method {pull}22371[#22371] +* Add doc_count to ParsedMatrixStats {pull}24952[#24952] (issue: {issue}24776[#24776]) +* Add fromXContent method to ClearScrollResponse {pull}24909[#24909] +* ClearScrollRequest to implement ToXContentObject {pull}24907[#24907] +* SearchScrollRequest to implement ToXContentObject {pull}24906[#24906] (issue: {issue}3889[#3889]) +* Add aggs parsers for high level REST Client {pull}24824[#24824] (issues: {issue}23965[#23965], {issue}23973[#23973], {issue}23974[#23974], {issue}24085[#24085], {issue}24160[#24160], {issue}24162[#24162], {issue}24182[#24182], {issue}24183[#24183], {issue}24208[#24208], {issue}24213[#24213], {issue}24239[#24239], {issue}24284[#24284], {issue}24312[#24312], {issue}24330[#24330], {issue}24365[#24365], {issue}24371[#24371], {issue}24442[#24442], {issue}24521[#24521], {issue}24524[#24524], {issue}24564[#24564], {issue}24583[#24583], {issue}24589[#24589], {issue}24648[#24648], {issue}24667[#24667], {issue}24675[#24675], {issue}24682[#24682], {issue}24700[#24700], {issue}24706[#24706], {issue}24717[#24717], {issue}24720[#24720], {issue}24738[#24738], {issue}24746[#24746], {issue}24789[#24789], {issue}24791[#24791], {issue}24794[#24794], {issue}24796[#24796], {issue}24822[#24822]) + +Java REST Client:: +* Shade external dependencies in the rest client jar {pull}25780[#25780] (issue: {issue}25208[#25208]) +* RestClient uses system properties and system default SSLContext {pull}25757[#25757] (issue: {issue}23231[#23231]) +* Wrap rest httpclient with doPrivileged blocks {pull}22603[#22603] (issue: {issue}22116[#22116]) + +Logging:: +* Prevent excessive disk consumption by log files {pull}25660[#25660] +* Use LRU set to reduce repeat deprecation messages {pull}25474[#25474] (issue: {issue}25457[#25457]) + +Mapping:: +* More efficient encoding of range fields. {pull}26470[#26470] (issue: {issue}26443[#26443]) +* Don't detect source's XContentType in DocumentParser.parseDocument() {pull}26880[#26880] +* Better validation of `copy_to`. {pull}25983[#25983] +* Optimize `terms` queries on `ip` addresses to use a `PointInSetQuery` whenever possible. {pull}25669[#25669] (issue: {issue}25667[#25667]) +* Loosen the restrictions on disabling _all in 6.x {pull}26259[#26259] +* Date detection should not rely on a hardcoded set of characters. {pull}22171[#22171] (issue: {issue}1694[#1694]) +* Identify documents by their `_id`. {pull}24460[#24460] + +Network:: +* Add additional low-level logging handler {pull}26887[#26887] +* Unwrap causes when maybe dying {pull}26884[#26884] +* Move TransportStats accounting into TcpTransport {pull}25251[#25251] +* Simplify connection closing and cleanups in TcpTransport {pull}25250[#25250] +* Disable the Netty recycler in the client {pull}24793[#24793] (issues: {issue}22452[#22452], {issue}24721[#24721]) +* Remove Netty logging hack {pull}24653[#24653] (issues: {issue}24469[#24469], {issue}5624[#5624], {issue}6568[#6568], {issue}6696[#6696]) +* Isolate SocketPermissions to Netty {pull}23057[#23057] +* Wrap netty accept/connect ops with doPrivileged {pull}22572[#22572] (issue: {issue}22116[#22116]) +* Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions {pull}22287[#22287] (issue: {issue}22116[#22116]) + +Packaging:: +* Remove memlock suggestion from systemd service {pull}25979[#25979] +* Set address space limit in systemd service file {pull}25975[#25975] +* Version option should display if snapshot {pull}25970[#25970] +* Ignore JVM options before checking Java version {pull}25969[#25969] +* Also skip JAVA_TOOL_OPTIONS on Windows {pull}25968[#25968] +* Introduce elasticsearch-env for Windows {pull}25958[#25958] +* Introduce elasticsearch-env {pull}25815[#25815] (issue: {issue}20286[#20286]) +* Stop exporting HOSTNAME from scripts {pull}25807[#25807] +* Set number of processes in systemd unit file {pull}24970[#24970] (issue: {issue}20874[#20874]) + +Parent/Child:: +* Remove ParentJoinFieldSubFetchPhase {pull}25550[#25550] (issue: {issue}25363[#25363]) +* Support parent id being specified as number in the _source {pull}25547[#25547] + +Percolator:: +* Store the QueryBuilder's Writable representation instead of its XContent representation {pull}25456[#25456] +* Add support for selecting percolator query candidate matches containing wildcard / prefix queries {pull}25351[#25351] + +Plugin Discovery EC2:: +* Read ec2 discovery address from aws instance tags {pull}22743[#22743] (issue: {issue}22566[#22566]) + +Plugin Lang Painless:: +* Allow Custom Whitelists in Painless {pull}25557[#25557] +* Update Painless to Allow Augmentation from Any Class {pull}25360[#25360] +* Add Needs Methods to Painless Script Context Factories {pull}25267[#25267] +* Support Script Context Stateful Factory in Painless {pull}25233[#25233] +* Generate Painless Factory for Creating Script Instances {pull}25120[#25120] +* Update Painless to Use New Script Contexts {pull}25015[#25015] +* Optimize instance creation in LambdaBootstrap {pull}24618[#24618] +* Make Painless Compiler Use an Instance Per Context {pull}24972[#24972] +* Make PainlessScript An Interface {pull}24966[#24966] + +Plugin Repository GCS:: +* GCS Repository: Add secure storage of credentials {pull}24697[#24697] + +Plugin Repository HDFS:: +* Add permission checks before reading from HDFS stream {pull}26716[#26716] (issue: {issue}26714[#26714]) +* Add doPrivilege blocks for socket connect ops in repository-hdfs {pull}22793[#22793] (issue: {issue}22116[#22116]) +* Add Kerberos support for Repo HDFS plugin [ISSUE] {pull}21990[#21990] + +Plugin Repository S3:: +* S3 Repository: Add back repository level credentials {pull}24609[#24609] + +Plugins:: +* Adjust SHA-512 supported format on plugin install {pull}27093[#27093] +* Move tribe to a module {pull}25778[#25778] +* Plugins can register pre-configured char filters {pull}25000[#25000] (issue: {issue}23658[#23658]) +* Add purge option to remove plugin CLI {pull}24981[#24981] +* Allow plugins to register pre-configured tokenizers {pull}24751[#24751] (issues: {issue}24223[#24223], {issue}24572[#24572]) +* Move ReindexAction class to core {pull}24684[#24684] (issue: {issue}24578[#24578]) +* Make PreConfiguredTokenFilter harder to misuse {pull}24572[#24572] (issue: {issue}23658[#23658]) +* Plugins: Remove leniency for missing plugins dir {pull}24173[#24173] +* Add doPrivilege blocks for socket connect operations in plugins {pull}22534[#22534] (issue: {issue}22116[#22116]) + +Query DSL:: +* Make slop optional when parsing `span_near` query {pull}25677[#25677] (issue: {issue}25642[#25642]) +* Require a field when a `seed` is provided to the `random_score` function. {pull}25594[#25594] (issue: {issue}25240[#25240]) +* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}23147[#23147] + +REST:: +* Cat shards bytes {pull}26952[#26952] +* Refactor PathTrie and RestController to use a single trie for all methods {pull}25459[#25459] (issue: {issue}24437[#24437]) +* Make ObjectParser support string to boolean conversion {pull}24668[#24668] (issue: {issue}21802[#21802]) + +Recovery:: +* Introduce a History UUID as a requirement for ops based recovery {pull}26577[#26577] (issue: {issue}10708[#10708]) +* Goodbye, Translog Views {pull}25962[#25962] +* Disallow multiple concurrent recovery attempts for same target shard {pull}25428[#25428] +* Live primary-replica resync (no rollback) {pull}24841[#24841] (issue: {issue}10708[#10708]) +* Peer Recovery: remove maxUnsafeAutoIdTimestamp hand off {pull}24243[#24243] (issue: {issue}24149[#24149]) +* Introduce sequence-number-based recovery {pull}22484[#22484] (issue: {issue}10708[#10708]) + +Scripting:: +* Scripting: Rename SearchScript.needsScores to needs_score {pull}25235[#25235] +* Scripting: Add optional context parameter to put stored script requests {pull}25014[#25014] +* Add New Security Script Settings {pull}24637[#24637] (issue: {issue}24532[#24532]) +* Add StatefulFactoryType as optional intermediate factory in script contexts {pull}24974[#24974] (issue: {issue}20426[#20426]) +* Make contexts available to ScriptEngine construction {pull}24896[#24896] +* Make ScriptEngine.compile generic on the script context {pull}24873[#24873] +* Add instance and compiled classes to script contexts {pull}24868[#24868] + +Search:: +* Add soft limit on allowed number of script fields in request {pull}26598[#26598] (issue: {issue}26390[#26390]) +* Add a soft limit for the number of requested doc-value fields {pull}26574[#26574] (issue: {issue}26390[#26390]) +* Rewrite search requests on the coordinating nodes {pull}25814[#25814] (issue: {issue}25791[#25791]) +* Ensure query resources are fetched asynchronously during rewrite {pull}25791[#25791] +* Introduce a new Rewriteable interface to streamline rewriting {pull}25788[#25788] +* Reduce the scope of `QueryRewriteContext` {pull}25787[#25787] +* Reduce the overhead of timeouts and low-level search cancellation. {pull}25776[#25776] +* Reduce profiling overhead. {pull}25772[#25772] (issue: {issue}24799[#24799]) +* Prevent `can_match` requests from sending to incompatible nodes {pull}25705[#25705] (issue: {issue}25704[#25704]) +* Add a shard filter search phase to pre-filter shards based on query rewriting {pull}25658[#25658] +* Ensure we rewrite common queries to `match_none` if possible {pull}25650[#25650] +* Limit the number of concurrent shard requests per search request {pull}25632[#25632] +* Add cluster name validation to RemoteClusterConnection {pull}25568[#25568] +* Speed up sorted scroll when the index sort matches the search sort {pull}25138[#25138] (issue: {issue}6720[#6720]) +* Leverage scorerSupplier when applicable. {pull}25109[#25109] +* Add Cross Cluster Search support for scroll searches {pull}25094[#25094] +* Track EWMA[1] of task execution time in search threadpool executor {pull}24989[#24989] (issue: {issue}24915[#24915]) +* Query range fields by doc values when they are expected to be more efficient than points {pull}24823[#24823] (issue: {issue}24314[#24314]) +* Search: Fairer balancing when routing searches by session ID {pull}24671[#24671] (issue: {issue}24642[#24642]) +* Add parsing from xContent to Suggest {pull}22903[#22903] +* Add parsing from xContent to ShardSearchFailure {pull}22699[#22699] +* Eliminate array access in tight loops when profiling is enabled. {pull}24959[#24959] +* Support Multiple Inner Hits on a Field Collapse Request {pull}24517[#24517] +* Expand cross cluster search indices for search requests to the concrete index or to it's aliases {pull}24502[#24502] + +Search Templates:: +* Add max concurrent searches to multi template search {pull}24255[#24255] (issues: {issue}20912[#20912], {issue}21907[#21907]) + +Sequence IDs:: +* Roll translog generation on primary promotion {pull}27313[#27313] +* Restoring from snapshot should force generation of a new history uuid {pull}26694[#26694] (issues: {issue}10708[#10708], {issue}26544[#26544], {issue}26557[#26557], {issue}26577[#26577]) +* Add global checkpoint tracking on the primary {pull}26666[#26666] (issue: {issue}26591[#26591]) +* Introduce global checkpoint background sync {pull}26591[#26591] (issues: {issue}26573[#26573], {issue}26630[#26630], {issue}26666[#26666]) +* Move `UNASSIGNED_SEQ_NO` and `NO_OPS_PERFORMED` to SequenceNumbers` {pull}26494[#26494] (issue: {issue}10708[#10708]) +* Move primary term from ReplicationRequest to ConcreteShardRequest {pull}25822[#25822] +* Add reason to global checkpoint updates on replica {pull}25612[#25612] (issue: {issue}10708[#10708]) +* Introduce primary/replica mode for GlobalCheckPointTracker {pull}25468[#25468] +* Throw back replica local checkpoint on new primary {pull}25452[#25452] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Update global checkpoint when increasing primary term on replica {pull}25422[#25422] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Enable a long translog retention policy by default {pull}25294[#25294] (issues: {issue}10708[#10708], {issue}25147[#25147]) +* Introduce primary context {pull}25122[#25122] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Block older operations on primary term transition {pull}24779[#24779] (issue: {issue}10708[#10708]) +* Block global checkpoint advances when recovering {pull}24404[#24404] (issue: {issue}10708[#10708]) +* Add primary term to doc write response {pull}24171[#24171] (issue: {issue}10708[#10708]) +* Preserve multiple translog generations {pull}24015[#24015] (issue: {issue}10708[#10708]) +* Introduce translog generation rolling {pull}23606[#23606] (issue: {issue}10708[#10708]) +* Replicate write failures {pull}23314[#23314] +* Introduce sequence-number-aware translog {pull}22822[#22822] (issue: {issue}10708[#10708]) +* Introduce translog no-op {pull}22291[#22291] (issue: {issue}10708[#10708]) +* Tighten sequence numbers recovery {pull}22212[#22212] (issue: {issue}10708[#10708]) +* Add BWC layer to seq no infra and enable BWC tests {pull}22185[#22185] (issue: {issue}21670[#21670]) +* Add internal _primary_term doc values field, fix _seq_no indexing {pull}21637[#21637] (issues: {issue}10708[#10708], {issue}21480[#21480]) +* Add global checkpoint to translog checkpoints {pull}21254[#21254] +* Sequence numbers commit data for Lucene uses Iterable interface {pull}20793[#20793] (issue: {issue}10708[#10708]) +* Simplify GlobalCheckpointService and properly hook it for cluster state updates {pull}20720[#20720] +* Fill gaps on primary promotion {pull}24945[#24945] (issue: {issue}10708[#10708]) +* Introduce clean transition on primary promotion {pull}24925[#24925] (issue: {issue}10708[#10708]) +* Guarantee that translog generations are seqNo conflict free {pull}24825[#24825] (issues: {issue}10708[#10708], {issue}24779[#24779]) +* Inline global checkpoints {pull}24513[#24513] (issue: {issue}10708[#10708]) + +Settings:: +* Add disk threshold settings validation {pull}25600[#25600] (issue: {issue}25560[#25560]) +* Enable cross-setting validation {pull}25560[#25560] (issue: {issue}25541[#25541]) +* Validate `transport.profiles.*` settings {pull}25508[#25508] +* Cleanup network / transport related settings {pull}25489[#25489] +* Emit settings deprecation logging at most once {pull}25457[#25457] +* IndexMetaData: Introduce internal format index setting {pull}25292[#25292] +* Persist created keystore on startup unless keystore is present {pull}26253[#26253] (issue: {issue}26126[#26126]) +* Settings: Add keystore.seed auto generated secure setting {pull}26149[#26149] +* Settings: Add keystore creation to add commands {pull}26126[#26126] + +Snapshot/Restore:: +* Fixed references to Multi Index Syntax {pull}27283[#27283] +* Improves snapshot logging and snapshot deletion error handling {pull}25264[#25264] +* Enhances get snapshots API to allow retrieving repository index only {pull}24477[#24477] (issue: {issue}24288[#24288]) + +Stats:: +* Update `IndexShard#refreshMetric` via a `ReferenceManager.RefreshListener` {pull}25083[#25083] (issues: {issue}24806[#24806], {issue}25052[#25052]) +* Expose disk usage estimates in nodes stats {pull}22081[#22081] (issue: {issue}8686[#8686]) + +Store:: +* Remote support for lucene versions without checksums {pull}24021[#24021] + +Suggesters:: +* Remove deprecated _suggest endpoint {pull}22203[#22203] (issue: {issue}20305[#20305]) + +Task Manager:: +* Add descriptions to bulk tasks {pull}22059[#22059] (issue: {issue}21768[#21768]) + +Translog:: +* Translog file recovery should not rely on lucene commits {pull}25005[#25005] (issue: {issue}24950[#24950]) + +[float] +=== Bug Fixes + +Aggregations:: +* Do not delegate a null scorer to LeafBucketCollectors {pull}26747[#26747] (issue: {issue}26611[#26611]) +* Create weights lazily in filter and filters aggregation {pull}26983[#26983] +* Fix IndexOutOfBoundsException in histograms for NaN doubles (#26787) {pull}26856[#26856] (issue: {issue}26787[#26787]) +* Scripted_metric _agg parameter disappears if params are provided {pull}19863[#19863] (issue: {issue}19768[#19768]) +* Fixes array out of bounds for value count agg {pull}26038[#26038] (issue: {issue}17379[#17379]) +* Aggregations bug: Significant_text fails on arrays of text. {pull}25030[#25030] (issue: {issue}25029[#25029]) +* Check bucket metric ages point to a multi bucket agg {pull}26215[#26215] (issue: {issue}25775[#25775]) +* Terms aggregation should remap global ordinal buckets when a sub-aggregator is used to sort the terms {pull}24941[#24941] (issue: {issue}24788[#24788]) +* Correctly set doc_count when MovAvg "predicts" values on existing buckets {pull}24892[#24892] (issue: {issue}24327[#24327]) +* DateHistogram: Fix `extended_bounds` with `offset` {pull}23789[#23789] (issue: {issue}23776[#23776]) +* Fix ArrayIndexOutOfBoundsException when no ranges are specified in the query {pull}23241[#23241] (issue: {issue}22881[#22881]) + +Aliases:: +* mget with an alias shouldn't ignore alias routing {pull}25697[#25697] (issue: {issue}25696[#25696]) +* GET aliases should 404 if aliases are missing {pull}25043[#25043] (issue: {issue}24644[#24644]) + +Allocation:: +* Fix DiskThresholdMonitor flood warning {pull}26204[#26204] (issue: {issue}26201[#26201]) +* Allow wildcards for shard IP filtering {pull}26187[#26187] (issues: {issue}22591[#22591], {issue}26184[#26184]) + +Analysis:: +* Pre-configured shingle filter should disable graph analysis {pull}25853[#25853] (issue: {issue}25555[#25555]) +* PatternAnalyzer should lowercase wildcard queries when `lowercase` is true. {pull}24967[#24967] + +CAT API:: +* Fix NPE for /_cat/indices when no primary shard {pull}26953[#26953] (issue: {issue}26942[#26942]) + +CRUD:: +* Serialize and expose timeout of acknowledged requests in REST layer {pull}26189[#26189] (issue: {issue}26213[#26213]) +* Fix silent loss of last command to _bulk and _msearch due to missing newline {pull}25740[#25740] (issue: {issue}7601[#7601]) + +Cache:: +* Reduce the default number of cached queries. {pull}26949[#26949] (issue: {issue}26938[#26938]) +* fix bug of weight computation {pull}24856[#24856] + +Circuit Breakers:: +* Checks the circuit breaker before allocating bytes for a new big array {pull}25010[#25010] (issue: {issue}24790[#24790]) + +Cluster:: +* Register setting `cluster.indices.tombstones.size` {pull}26193[#26193] (issue: {issue}26191[#26191]) + +Core:: +* Correctly encode warning headers {pull}27269[#27269] (issue: {issue}27244[#27244]) +* Fix cache compute if absent for expired entries {pull}26516[#26516] +* Timed runnable should delegate to abstract runnable {pull}27095[#27095] (issue: {issue}27069[#27069]) +* Stop invoking non-existent syscall {pull}27016[#27016] (issue: {issue}20179[#20179]) +* MetaData Builder doesn't properly prevent an alias with the same name as an index {pull}26804[#26804] +* Release operation permit on thread-pool rejection {pull}25930[#25930] (issue: {issue}25863[#25863]) +* Node should start up despite of a lingering `.es_temp_file` {pull}21210[#21210] (issue: {issue}21007[#21007]) +* Fix cache expire after access {pull}24546[#24546] + +Dates:: +* Fix typo in date format {pull}26503[#26503] (issue: {issue}26500[#26500]) + +Discovery:: +* MasterNodeChangePredicate should use the node instance to detect master change {pull}25877[#25877] (issue: {issue}25471[#25471]) + +Engine:: +* Die with dignity while merging {pull}27265[#27265] (issue: {issue}19272[#19272]) +* Engine - do not index operations with seq# lower than the local checkpoint into lucene {pull}25827[#25827] (issues: {issue}1[#1], {issue}2[#2], {issue}25592[#25592]) + +Geo:: +* Fix typo in GeoUtils#isValidLongitude {pull}25121[#25121] + +Highlighting:: +* Fix percolator highlight sub fetch phase to not highlight query twice {pull}26622[#26622] +* FastVectorHighlighter should not cache the field query globally {pull}25197[#25197] (issue: {issue}25171[#25171]) +* Higlighters: Fix MultiPhrasePrefixQuery rewriting {pull}25103[#25103] (issue: {issue}25088[#25088]) +* Fix nested query highlighting {pull}26305[#26305] (issue: {issue}26230[#26230]) + +Index APIs:: +* Shrink API should ignore templates {pull}25380[#25380] (issue: {issue}25035[#25035]) +* Rollover max docs should only count primaries {pull}24977[#24977] (issue: {issue}24217[#24217]) +* Validates updated settings on closed indices {pull}24487[#24487] (issue: {issue}23787[#23787]) + +Ingest:: +* date processor should not fail if timestamp is specified as json number {pull}26986[#26986] (issue: {issue}26967[#26967]) +* date_index_name processor should not fail if timestamp is specified as json number {pull}26910[#26910] (issue: {issue}26890[#26890]) +* Sort Processor does not have proper behavior with targetField {pull}25237[#25237] (issue: {issue}24133[#24133]) +* fix grok's pattern parsing to validate pattern names in expression {pull}25063[#25063] (issue: {issue}22831[#22831]) +* Remove support for Visio and potm files {pull}22079[#22079] (issue: {issue}22077[#22077]) +* Fix floating-point error when DateProcessor parses UNIX {pull}24947[#24947] +* add option for _ingest.timestamp to use new ZonedDateTime (5.x backport) {pull}24030[#24030] (issues: {issue}23168[#23168], {issue}23174[#23174]) + +Inner Hits:: +* Do not allow inner hits that fetch _source and have a non nested object field as parent {pull}25749[#25749] (issue: {issue}25315[#25315]) +* When fetching nested inner hits only access stored fields when needed {pull}25864[#25864] (issue: {issue}6[#6]) +* If size / offset are out of bounds just do a plain count {pull}20556[#20556] (issue: {issue}20501[#20501]) +* Fix Source filtering in new field collapsing feature {pull}24068[#24068] (issue: {issue}24063[#24063]) + +Internal:: +* Bump version to 6.0.1 [OPEN] {pull}27386[#27386] +* `IndexShard.routingEntry` should only be updated once all internal state is ready {pull}26776[#26776] +* Catch exceptions and inform handler in RemoteClusterConnection#collectNodes {pull}26725[#26725] (issue: {issue}26700[#26700]) +* Internal: Add versionless alias for rest client codebase in policy files {pull}26521[#26521] +* Upgrade Lucene to version 7.0.1 {pull}26926[#26926] +* Fix BytesReferenceStreamInput#skip with offset {pull}25634[#25634] +* Fix race condition in RemoteClusterConnection node supplier {pull}25432[#25432] +* Initialise empty lists in BaseTaskResponse constructor {pull}25290[#25290] +* Extract a common base class for scroll executions {pull}24979[#24979] (issue: {issue}16555[#16555]) +* Obey lock order if working with store to get metadata snapshots {pull}24787[#24787] (issue: {issue}24481[#24481]) +* Fix Version based BWC and set correct minCompatVersion {pull}24732[#24732] +* Fix `_field_caps` serialization in order to support cross cluster search {pull}24722[#24722] +* Avoid race when shutting down controller processes {pull}24579[#24579] +* Fix handling of document failure exception in InternalEngine {pull}22718[#22718] +* Ensure remote cluster is connected before fetching `_field_caps` {pull}24845[#24845] (issue: {issue}24763[#24763]) + +Java API:: +* BulkProcessor flush runnable preserves the thread context from creation time {pull}26718[#26718] (issue: {issue}26596[#26596]) + +Java High Level REST Client:: +* Make RestHighLevelClient's Request class public {pull}26627[#26627] (issue: {issue}26455[#26455]) +* Forbid direct usage of ContentType.create() methods {pull}26457[#26457] (issues: {issue}22769[#22769], {issue}26438[#26438]) +* Make ShardSearchTarget optional when parsing ShardSearchFailure {pull}27078[#27078] (issue: {issue}27055[#27055]) + +Java REST Client:: +* Better message text for ResponseException {pull}26564[#26564] +* rest-client-sniffer: configurable threadfactory {pull}26897[#26897] + +Logging:: +* Allow not configure logging without config {pull}26209[#26209] (issues: {issue}20575[#20575], {issue}24076[#24076]) + +Mapping:: +* Allow copying from a field to another field that belongs to the same nested object. {pull}26774[#26774] (issue: {issue}26763[#26763]) +* Fixed bug that mapper_parsing_exception is thrown for numeric field with ignore_malformed=true when inserting "NaN" {pull}25967[#25967] (issue: {issue}25289[#25289]) +* Coerce decimal strings for whole number types by truncating the decimal part {pull}25835[#25835] (issue: {issue}25819[#25819]) +* Fix parsing of ip range queries. {pull}25768[#25768] (issue: {issue}25636[#25636]) +* Disable date field mapping changing {pull}25285[#25285] (issue: {issue}25271[#25271]) +* Correctly enable _all for older 5.x indices {pull}25087[#25087] (issue: {issue}25068[#25068]) +* token_count datatype should handle null value {pull}25046[#25046] (issue: {issue}24928[#24928]) +* keep _parent field while updating child type mapping {pull}24407[#24407] (issue: {issue}23381[#23381]) +* ICUCollationKeywordFieldMapper use SortedSetDocValuesField {pull}26267[#26267] +* Fix serialization of the `_all` field. {pull}26143[#26143] (issue: {issue}26136[#26136]) + +More Like This:: +* Pass over _routing value with more_like_this items to be retrieved {pull}24679[#24679] (issue: {issue}23699[#23699]) + +NOT CLASSIFIED:: +* DocumentMissingException during Logstash scripted upsert [ISSUE] {pull}27148[#27148] +* An assertion trips when master opens an index from before 5.x [ISSUE] {pull}24809[#24809] + +Nested Docs:: +* In case of a single type the _id field should be added to the nested document instead of _uid field {pull}25149[#25149] +* Inner hits source filtering not working [ISSUE] {pull}23090[#23090] + +Network:: +* Fixed ByteBuf leaking in org.elasticsearch.http.netty4.Netty4HttpRequestHandler {pull}27222[#27222] (issues: {issue}3[#3], {issue}4[#4], {issue}5[#5], {issue}6[#6]) +* Check for closed connection while opening {pull}26932[#26932] +* Ensure pending transport handlers are invoked for all channel failures {pull}25150[#25150] +* Notify onConnectionClosed rather than onNodeDisconnect to prune transport handlers {pull}24639[#24639] (issues: {issue}24557[#24557], {issue}24575[#24575], {issue}24632[#24632]) +* Release pipelined http responses on close {pull}26226[#26226] +* Fix error message if an incompatible node connects {pull}24884[#24884] + +Packaging:: +* Fix handling of Windows paths containing parentheses {pull}26916[#26916] (issue: {issue}26454[#26454]) +* Exit Windows scripts promptly on failure {pull}25959[#25959] +* Pass config path as a system property {pull}25943[#25943] +* ES_HOME needs to be made absolute before attempt at traversal {pull}25865[#25865] +* Fix elasticsearch-keystore handling of path.conf {pull}25811[#25811] +* Stop disabling explicit GC {pull}25759[#25759] +* Avoid failing install if system-sysctl is masked {pull}25657[#25657] (issue: {issue}24234[#24234]) +* Get short path name for native controllers {pull}25344[#25344] +* When stopping via systemd only kill the JVM, not its control group {pull}25195[#25195] +* remove remaining references to scripts directory {pull}24771[#24771] +* Handle parentheses in batch file path {pull}24731[#24731] (issue: {issue}24712[#24712]) +* Detect modified keystore on package removal {pull}26300[#26300] +* Create keystore on RPM and Debian package install {pull}26282[#26282] +* Add safer empty variable checking for Windows {pull}26268[#26268] (issue: {issue}26261[#26261]) +* Export HOSTNAME environment variable {pull}26262[#26262] (issues: {issue}25807[#25807], {issue}26255[#26255]) +* Fix daemonization command status test {pull}26196[#26196] (issue: {issue}26080[#26080]) +* Set RuntimeDirectory in systemd service {pull}23526[#23526] + +Parent/Child:: +* The default _parent field should not try to load global ordinals {pull}25851[#25851] (issue: {issue}25849[#25849]) + +Percolator:: +* Also support query extraction for queries wrapped inside a ESToParentBlockJoinQuery {pull}26754[#26754] +* Fix range queries with date range based on current time in percolator queries. {pull}24666[#24666] (issue: {issue}23921[#23921]) + +Plugin Analysis Kuromoji:: +* Fix kuromoji default stoptags {pull}26600[#26600] (issue: {issue}26519[#26519]) + +Plugin Analysis Phonetic:: +* Fix beidermorse phonetic token filter for unspecified `languageset` {pull}27112[#27112] (issue: {issue}26771[#26771]) + +Plugin Discovery File:: +* Fix discovery-file plugin to use custom config path {pull}26662[#26662] (issue: {issue}26660[#26660]) + +Plugin Ingest Attachment:: +* Add missing mime4j library {pull}22764[#22764] (issue: {issue}22077[#22077]) + +Plugin Lang Painless:: +* Painless: allow doubles to be casted to longs. {pull}25936[#25936] + +Plugin Repository Azure:: +* Azure snapshots can not be restored anymore {pull}26778[#26778] (issues: {issue}22858[#22858], {issue}26751[#26751], {issue}26777[#26777]) +* Snapshot : azure module - accelerate the listing of files (used in delete snapshot) {pull}25710[#25710] (issue: {issue}25424[#25424]) +* Use Azure upload method instead of our own implementation {pull}26751[#26751] +* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) + +Plugin Repository GCS:: +* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) + +Plugin Repository HDFS:: +* Add Log4j to SLF4J binding for repository-hdfs {pull}26514[#26514] (issue: {issue}26512[#26512]) +* Upgrading HDFS Repository Plugin to use HDFS 2.8.1 Client {pull}25497[#25497] (issue: {issue}25450[#25450]) + +Plugin Repository S3:: +* Avoid SecurityException in repository-S3 on DefaultS3OutputStream.flush() {pull}25254[#25254] (issue: {issue}25192[#25192]) +* Wrap getCredentials() in a doPrivileged() block {pull}23297[#23297] (issues: {issue}22534[#22534], {issue}23271[#23271]) + +Plugins:: +* X-Pack plugin download fails on Windows desktop [ISSUE] {pull}24570[#24570] +* Fix plugin installation permissions {pull}24527[#24527] (issue: {issue}24480[#24480]) + +Query DSL:: +* Fixed incomplete JSON body on count request making org.elasticsearch.rest.action.RestActions#parseTopLevelQueryBuilder go into endless loop {pull}26680[#26680] (issue: {issue}26083[#26083]) +* SpanNearQueryBuilder should return the inner clause when a single clause is provided {pull}25856[#25856] (issue: {issue}25630[#25630]) +* Refactor field expansion for match, multi_match and query_string query {pull}25726[#25726] (issues: {issue}25551[#25551], {issue}25556[#25556]) +* WrapperQueryBuilder should also rewrite the parsed query {pull}25480[#25480] + +REST:: +* Rest test fixes {pull}27354[#27354] +* Fix inconsistencies in the rest api specs for cat.snapshots {pull}26996[#26996] (issues: {issue}25737[#25737], {issue}26923[#26923]) +* Fix inconsistencies in the rest api specs for *_script {pull}26971[#26971] (issue: {issue}26923[#26923]) +* exists template needs a template name {pull}25988[#25988] +* Fix handling of invalid error trace parameter {pull}25785[#25785] (issue: {issue}25774[#25774]) +* Fix handling of exceptions thrown on HEAD requests {pull}25172[#25172] (issue: {issue}21125[#21125]) +* Fixed NPEs caused by requests without content. {pull}23497[#23497] (issue: {issue}24701[#24701]) +* Fix get mappings HEAD requests {pull}23192[#23192] (issue: {issue}21125[#21125]) + +Recovery:: +* Close translog view after primary-replica resync {pull}25862[#25862] (issue: {issue}24841[#24841]) + +Reindex API:: +* Fix update_by_query's default size parameter {pull}26784[#26784] (issue: {issue}26761[#26761]) +* Reindex: don't duplicate _source parameter {pull}24629[#24629] (issue: {issue}24628[#24628]) +* Add qa module that tests reindex-from-remote against pre-5.0 versions of Elasticsearch {pull}24561[#24561] (issues: {issue}23828[#23828], {issue}24520[#24520]) + +Scroll:: +* Fix single shard scroll within a cluster with nodes in version `>= 5.3` and `<= 5.3` {pull}24512[#24512] + +Search:: +* Fail query when a sort is provided in conjunction with rescorers {pull}26510[#26510] +* Let search phases override max concurrent requests {pull}26484[#26484] (issue: {issue}26198[#26198]) +* Avoid stack overflow on search phases {pull}27069[#27069] (issue: {issue}27042[#27042]) +* Fix search_after with geo distance sorting {pull}26891[#26891] +* Fix serialization errors when cross cluster search goes to a single shard {pull}26881[#26881] (issue: {issue}26833[#26833]) +* Early termination with index sorting should not set terminated_early in the response {pull}26597[#26597] (issue: {issue}26408[#26408]) +* Format doc values fields. {pull}22146[#22146] +* Fix term(s) query for range field {pull}25918[#25918] +* Caching a MinDocQuery can lead to wrong results. {pull}25909[#25909] +* Fix random score generation when no seed is provided. {pull}25908[#25908] +* Merge FunctionScoreQuery and FiltersFunctionScoreQuery {pull}25889[#25889] (issues: {issue}15709[#15709], {issue}23628[#23628]) +* Respect cluster alias in `_index` aggs and queries {pull}25885[#25885] (issue: {issue}25606[#25606]) +* First increment shard stats before notifying and potentially sending response {pull}25818[#25818] +* Remove assertion about deviation when casting to a float. {pull}25806[#25806] (issue: {issue}25330[#25330]) +* Prevent skipping shards if a suggest builder is present {pull}25739[#25739] (issue: {issue}25658[#25658]) +* Ensure remote cluster alias is preserved in inner hits aggs {pull}25627[#25627] (issue: {issue}25606[#25606]) +* Do not search locally if remote index pattern resolves to no indices {pull}25436[#25436] (issue: {issue}25426[#25426]) +* Adds check for negative search request size {pull}25397[#25397] (issue: {issue}22530[#22530]) +* Make sure range queries are correctly profiled. {pull}25108[#25108] +* Fix RangeFieldMapper rangeQuery to properly handle relations {pull}24808[#24808] (issue: {issue}24744[#24744]) +* Fix ExpandSearchPhase when response contains no hits {pull}24688[#24688] (issue: {issue}24672[#24672]) +* Refactor simple_query_string to handle text part like multi_match and query_string {pull}26145[#26145] (issue: {issue}25726[#25726]) +* Fix `_exists_` in query_string on empty indices. {pull}25993[#25993] (issue: {issue}25956[#25956]) +* Fix script field sort returning Double.MAX_VALUE for all documents {pull}24942[#24942] (issue: {issue}24940[#24940]) +* Compute the took time of the query after the expand phase of field collapsing {pull}24902[#24902] (issue: {issue}24900[#24900]) + +Sequence IDs:: +* Fire global checkpoint sync under system context {pull}26984[#26984] +* Fix pre-6.0 response to unknown replication actions {pull}25744[#25744] (issue: {issue}10708[#10708]) +* Track local checkpoint on primary immediately {pull}25434[#25434] (issues: {issue}10708[#10708], {issue}25355[#25355], {issue}25415[#25415]) +* Initialize max unsafe auto ID timestamp on shrink {pull}25356[#25356] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Use correct primary term for replicating NOOPs {pull}25128[#25128] +* Handle already closed while filling gaps {pull}25021[#25021] (issue: {issue}24925[#24925]) +* TranslogWriter.assertNoSeqNumberConflict failure [ISSUE] {pull}26710[#26710] +* Avoid losing ops in file-based recovery {pull}22945[#22945] (issue: {issue}22484[#22484]) +* Handle primary failure handling replica response {pull}24926[#24926] (issue: {issue}24935[#24935]) + +Settings:: +* Emit settings deprecation logging on empty update {pull}27017[#27017] (issue: {issue}26419[#26419]) +* Fix filtering for ListSetting {pull}26914[#26914] +* Fix settings serialization to not serialize secure settings or not take the total size into account {pull}25323[#25323] +* Keystore CLI should use the AddFileKeyStoreCommand for files {pull}25298[#25298] +* Allow resetting settings that use an IP validator {pull}24713[#24713] (issue: {issue}24709[#24709]) +* Updating an unrecognized setting should error out with that reason [ISSUE] {pull}25607[#25607] +* Settings: Fix setting groups to include secure settings {pull}25076[#25076] (issue: {issue}25069[#25069]) + +Similarities:: +* Add boolean similarity to built in similarity types {pull}26613[#26613] + +Snapshot/Restore:: +* Snapshot/Restore: better handle incorrect chunk_size settings in FS repo {pull}26844[#26844] (issue: {issue}26843[#26843]) +* Snapshot/Restore: Ensure that shard failure reasons are correctly stored in CS {pull}25941[#25941] (issue: {issue}25878[#25878]) +* Output all empty snapshot info fields if in verbose mode {pull}25455[#25455] (issue: {issue}24477[#24477]) +* Remove redundant and broken MD5 checksum from repository-s3 {pull}25270[#25270] (issue: {issue}25269[#25269]) +* Consolidates the logic for cleaning up snapshots on master election {pull}24894[#24894] (issue: {issue}24605[#24605]) +* Removes completed snapshot from cluster state on master change {pull}24605[#24605] (issue: {issue}24452[#24452]) +* Keep snapshot restore state and routing table in sync {pull}20836[#20836] (issue: {issue}19774[#19774]) +* Master failover during snapshotting could leave the snapshot incomplete [OPEN] [ISSUE] {pull}25281[#25281] +* Fix inefficient (worst case exponential) loading of snapshot repository {pull}24510[#24510] (issue: {issue}24509[#24509]) + +Stats:: +* Fix RestGetAction name typo {pull}27266[#27266] +* Keep cumulative elapsed scroll time in microseconds {pull}27068[#27068] (issue: {issue}27046[#27046]) +* _nodes/stats should not fail due to concurrent AlreadyClosedException {pull}25016[#25016] (issue: {issue}23099[#23099]) +* Avoid double decrement on current query counter {pull}24922[#24922] (issues: {issue}22996[#22996], {issue}24872[#24872]) +* Adjust available and free bytes to be non-negative on huge FSes {pull}24911[#24911] (issues: {issue}23093[#23093], {issue}24453[#24453]) + +Suggesters:: +* Fix division by zero in phrase suggester that causes assertion to fail {pull}27149[#27149] +* Context suggester should filter doc values field {pull}25858[#25858] (issue: {issue}25404[#25404]) +* Fix context suggester to read values from keyword type field {pull}24200[#24200] (issue: {issue}24129[#24129]) + +Templates:: +* Tests: Fix FullClusterRestartIT.testSnapshotRestore test failing in 6.x {pull}27218[#27218] (issue: {issue}27213[#27213]) + +Translog:: +* Fix Translog.Delete serialization for sequence numbers {pull}22543[#22543] + +Upgrade API:: +* Upgrade API: fix excessive logging and unnecessary template updates {pull}26698[#26698] (issue: {issue}26673[#26673]) + +[float] +=== Regressions + +Bulk:: +* Only re-parse operation if a mapping update was needed {pull}23832[#23832] (issue: {issue}23665[#23665]) + +Highlighting:: +* Fix Fast Vector Highlighter NPE on match phrase prefix {pull}25116[#25116] (issue: {issue}25088[#25088]) + +Search:: +* Always use DisjunctionMaxQuery to build cross fields disjunction {pull}25115[#25115] (issue: {issue}23966[#23966]) + +Sequence IDs:: +* Indexing performance degradation in 6.0.0-beta1 [ISSUE] {pull}26339[#26339] + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to Lucene 7.0.0 {pull}26744[#26744] +* Upgrade to lucene-7.0.0-snapshot-d94a5f0. {pull}26441[#26441] +* Upgrade to lucene-7.0.0-snapshot-a128fcb. {pull}26090[#26090] +* Upgrade to a Lucene 7 snapshot {pull}24089[#24089] (issues: {issue}23966[#23966], {issue}24086[#24086], {issue}24087[#24087], {issue}24088[#24088]) + +Logging:: +* Upgrade to Log4j 2.9.1 {pull}26750[#26750] (issues: {issue}109[#109], {issue}26464[#26464], {issue}26467[#26467]) +* Upgrade to Log4j 2.9.0 {pull}26450[#26450] (issue: {issue}23798[#23798]) + +Network:: +* Upgrade to Netty 4.1.13.Final {pull}25581[#25581] (issues: {issue}24729[#24729], {issue}6866[#6866]) +* Upgrade to Netty 4.1.11.Final {pull}24652[#24652] + +Plugin Ingest Attachment:: +* Update to Tika 1.14 {pull}21591[#21591] (issue: {issue}20390[#20390]) + +Upgrade API:: +* Improve stability and logging of TemplateUpgradeServiceIT tests {pull}25386[#25386] (issue: {issue}25382[#25382]) + +[[release-notes-6.0.0-rc2]] +== {es} version 6.0.0-rc2 + +[float] +[[breaking-6.0.0-rc2]] +=== Breaking Changes + +Inner Hits:: +* Return the _source of inner hit nested as is without wrapping it into its full path context {pull}26982[#26982] (issues: {issue}26102[#26102], {issue}26944[#26944]) + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Ignore .DS_Store files on macOS {pull}27108[#27108] (issue: {issue}23982[#23982]) + +Index Templates:: +* Fix error message for a put index template request without index_patterns {pull}27102[#27102] (issue: {issue}27100[#27100]) + +Mapping:: +* Don't detect source's XContentType in DocumentParser.parseDocument() {pull}26880[#26880] + +Network:: +* Add additional low-level logging handler {pull}26887[#26887] +* Unwrap causes when maybe dying {pull}26884[#26884] + +Plugins:: +* Adjust SHA-512 supported format on plugin install {pull}27093[#27093] + +REST:: +* Cat shards bytes {pull}26952[#26952] + +[float] +=== Bug Fixes + +Aggregations:: +* Create weights lazily in filter and filters aggregation {pull}26983[#26983] +* Fix IndexOutOfBoundsException in histograms for NaN doubles (#26787) {pull}26856[#26856] (issue: {issue}26787[#26787]) +* Scripted_metric _agg parameter disappears if params are provided {pull}19863[#19863] (issue: {issue}19768[#19768]) + +CAT API:: +* Fix NPE for /_cat/indices when no primary shard {pull}26953[#26953] (issue: {issue}26942[#26942]) + +Cache:: +* Reduce the default number of cached queries. {pull}26949[#26949] (issue: {issue}26938[#26938]) + +Core:: +* Timed runnable should delegate to abstract runnable {pull}27095[#27095] (issue: {issue}27069[#27069]) +* Stop invoking non-existent syscall {pull}27016[#27016] (issue: {issue}20179[#20179]) +* MetaData Builder doesn't properly prevent an alias with the same name as an index {pull}26804[#26804] + +Ingest:: +* date processor should not fail if timestamp is specified as json number {pull}26986[#26986] (issue: {issue}26967[#26967]) +* date_index_name processor should not fail if timestamp is specified as json number {pull}26910[#26910] (issue: {issue}26890[#26890]) + +Internal:: +* Upgrade Lucene to version 7.0.1 {pull}26926[#26926] + +Java High Level REST Client:: +* Make ShardSearchTarget optional when parsing ShardSearchFailure {pull}27078[#27078] (issue: {issue}27055[#27055]) + +Java REST Client:: +* rest-client-sniffer: configurable threadfactory {pull}26897[#26897] + +Mapping:: +* wrong link target for datatype murmur3 {pull}27143[#27143] + +Network:: +* Check for closed connection while opening {pull}26932[#26932] + +Packaging:: +* Fix handling of Windows paths containing parentheses {pull}26916[#26916] (issue: {issue}26454[#26454]) + +Percolator:: +* Also support query extraction for queries wrapped inside a ESToParentBlockJoinQuery {pull}26754[#26754] + +Plugin Analysis Phonetic:: +* Fix beidermorse phonetic token filter for unspecified `languageset` {pull}27112[#27112] (issue: {issue}26771[#26771]) + +Plugin Repository Azure:: +* Use Azure upload method instead of our own implementation {pull}26751[#26751] + +REST:: +* Fix inconsistencies in the rest api specs for cat.snapshots {pull}26996[#26996] (issues: {issue}25737[#25737], {issue}26923[#26923]) +* Fix inconsistencies in the rest api specs for *_script {pull}26971[#26971] (issue: {issue}26923[#26923]) +* exists template needs a template name {pull}25988[#25988] + +Reindex API:: +* Fix update_by_query's default size parameter {pull}26784[#26784] (issue: {issue}26761[#26761]) + +Search:: +* Avoid stack overflow on search phases {pull}27069[#27069] (issue: {issue}27042[#27042]) +* Fix search_after with geo distance sorting {pull}26891[#26891] +* Fix serialization errors when cross cluster search goes to a single shard {pull}26881[#26881] (issue: {issue}26833[#26833]) +* Early termination with index sorting should not set terminated_early in the response {pull}26597[#26597] (issue: {issue}26408[#26408]) +* Format doc values fields. {pull}22146[#22146] + +Sequence IDs:: +* Fire global checkpoint sync under system context {pull}26984[#26984] + +Settings:: +* Emit settings deprecation logging on empty update {pull}27017[#27017] (issue: {issue}26419[#26419]) +* Fix filtering for ListSetting {pull}26914[#26914] + +Stats:: +* Keep cumulative elapsed scroll time in microseconds {pull}27068[#27068] (issue: {issue}27046[#27046]) + +Suggesters:: +* Fix division by zero in phrase suggester that causes assertion to fail {pull}27149[#27149] + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.0.0-rc1]] +== {es} version 6.0.0-rc1 + +[float] +[[breaking-6.0.0-rc1]] +=== Breaking Changes + +Packaging:: +* Configure heap dump path out of the box {pull}26755[#26755] (issue: {issue}26665[#26665]) + +Query DSL:: +* Remove deprecated `type` and `slop` field in `match` query {pull}26720[#26720] +* Remove several parse field deprecations in query builders {pull}26711[#26711] +* Remove deprecated parameters from `ids_query` {pull}26508[#26508] + +//[float] +//=== Breaking Java Changes + +[float] +=== Deprecations + +Plugins:: +* Plugins: Add backcompat for sha1 checksums {pull}26748[#26748] (issue: {issue}26746[#26746]) + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Allow `InputStreamStreamInput` array size validation where applicable {pull}26692[#26692] +* Refactor bootstrap check results and error messages {pull}26637[#26637] +* Add BootstrapContext to expose settings and recovered state to bootstrap checks {pull}26628[#26628] +* Unit testable index creation task on MetaDataCreateIndexService {pull}25961[#25961] + +Discovery:: +* Allow plugins to validate cluster-state on join {pull}26595[#26595] + +Mapping:: +* More efficient encoding of range fields. {pull}26470[#26470] (issue: {issue}26443[#26443]) + +Plugin Repository HDFS:: +* Add permission checks before reading from HDFS stream {pull}26716[#26716] (issue: {issue}26714[#26714]) + +Recovery:: +* Introduce a History UUID as a requirement for ops based recovery {pull}26577[#26577] (issue: {issue}10708[#10708]) + +Scripting:: +* ScriptService: Replace max compilation per minute setting with max compilation rate {pull}26399[#26399] + +Search:: +* Add soft limit on allowed number of script fields in request {pull}26598[#26598] (issue: {issue}26390[#26390]) +* Add a soft limit for the number of requested doc-value fields {pull}26574[#26574] (issue: {issue}26390[#26390]) + +Sequence IDs:: +* Restoring from snapshot should force generation of a new history uuid {pull}26694[#26694] (issues: {issue}10708[#10708], {issue}26544[#26544], {issue}26557[#26557], {issue}26577[#26577]) +* Add global checkpoint tracking on the primary {pull}26666[#26666] (issue: {issue}26591[#26591]) +* Introduce global checkpoint background sync {pull}26591[#26591] (issues: {issue}26573[#26573], {issue}26630[#26630], {issue}26666[#26666]) +* Move `UNASSIGNED_SEQ_NO` and `NO_OPS_PERFORMED` to SequenceNumbers` {pull}26494[#26494] (issue: {issue}10708[#10708]) + + +[float] +=== Bug Fixes + +Aggregations:: +* Do not delegate a null scorer to LeafBucketCollectors {pull}26747[#26747] (issue: {issue}26611[#26611]) + +Core:: +* Fix cache compute if absent for expired entries {pull}26516[#26516] + +Dates:: +* Fix typo in date format {pull}26503[#26503] (issue: {issue}26500[#26500]) + +Highlighting:: +* Fix percolator highlight sub fetch phase to not highlight query twice {pull}26622[#26622] + +Inner Hits:: +* Do not allow inner hits that fetch _source and have a non nested object field as parent {pull}25749[#25749] (issue: {issue}25315[#25315]) + +Internal:: +* `IndexShard.routingEntry` should only be updated once all internal state is ready {pull}26776[#26776] +* Catch exceptions and inform handler in RemoteClusterConnection#collectNodes {pull}26725[#26725] (issue: {issue}26700[#26700]) +* Internal: Add versionless alias for rest client codebase in policy files {pull}26521[#26521] + +Java API:: +* BulkProcessor flush runnable preserves the thread context from creation time {pull}26718[#26718] (issue: {issue}26596[#26596]) + +Java High Level REST Client:: +* Make RestHighLevelClient's Request class public {pull}26627[#26627] (issue: {issue}26455[#26455]) +* Forbid direct usage of ContentType.create() methods {pull}26457[#26457] (issues: {issue}22769[#22769], {issue}26438[#26438]) + +Java REST Client:: +* Better message text for ResponseException {pull}26564[#26564] + +Mapping:: +* Allow copying from a field to another field that belongs to the same nested object. {pull}26774[#26774] (issue: {issue}26763[#26763]) + +Plugin Analysis Kuromoji:: +* Fix kuromoji default stoptags {pull}26600[#26600] (issue: {issue}26519[#26519]) + +Plugin Discovery File:: +* Fix discovery-file plugin to use custom config path {pull}26662[#26662] (issue: {issue}26660[#26660]) + +Plugin Repository Azure:: +* Azure snapshots can not be restored anymore {pull}26778[#26778] (issues: {issue}22858[#22858], {issue}26751[#26751], {issue}26777[#26777]) +* Snapshot : azure module - accelerate the listing of files (used in delete snapshot) {pull}25710[#25710] (issue: {issue}25424[#25424]) + +Plugin Repository HDFS:: +* Add Log4j to SLF4J binding for repository-hdfs {pull}26514[#26514] (issue: {issue}26512[#26512]) + +Query DSL:: +* Fixed incomplete JSON body on count request making org.elasticsearch.rest.action.RestActions#parseTopLevelQueryBuilder go into endless loop {pull}26680[#26680] (issue: {issue}26083[#26083]) + +Search:: +* Fail query when a sort is provided in conjunction with rescorers {pull}26510[#26510] +* Let search phases override max concurrent requests {pull}26484[#26484] (issue: {issue}26198[#26198]) + +Similarities:: +* Add boolean similarity to built in similarity types {pull}26613[#26613] + +Upgrade API:: +* Upgrade API: fix excessive logging and unnecessary template updates {pull}26698[#26698] (issue: {issue}26673[#26673]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to Lucene 7.0.0 {pull}26744[#26744] +* Upgrade to lucene-7.0.0-snapshot-d94a5f0. {pull}26441[#26441] + +Logging:: +* Upgrade to Log4j 2.9.1 {pull}26750[#26750] (issues: {issue}109[#109], {issue}26464[#26464], {issue}26467[#26467]) +* Upgrade to Log4j 2.9.0 {pull}26450[#26450] (issue: {issue}23798[#23798]) + +[[release-notes-6.0.0-beta2]] +== {es} version 6.0.0-beta2 + +[float] +[[breaking-6.0.0-beta2]] +=== Breaking Changes + +Analysis:: +* Do not allow custom analyzers to have the same names as built-in analyzers {pull}22349[#22349] (issue: {issue}22263[#22263]) + +Cluster:: +* Disallow : in cluster and index/alias names {pull}26247[#26247] (issue: {issue}23892[#23892]) + +Inner Hits:: +* Unfiltered nested source should keep its full path {pull}26102[#26102] (issues: {issue}18567[#18567], {issue}23090[#23090]) + +Mapping:: +* Reject out of range numbers for float, double and half_float {pull}25826[#25826] (issue: {issue}25534[#25534]) + +Network:: +* Remove unused Netty-related settings {pull}26161[#26161] + +Packaging:: +* Rename CONF_DIR to ES_PATH_CONF {pull}26197[#26197] (issue: {issue}26154[#26154]) + +Query DSL:: +* Throw exception in scroll requests using `from` {pull}26235[#26235] (issue: {issue}9373[#9373]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Fix NPE when `values` is omitted on percentile_ranks agg {pull}26046[#26046] + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Aggregations:: +* Support distance units in GeoHashGrid aggregation precision {pull}26291[#26291] (issue: {issue}5042[#5042]) +* Reject multiple methods in `percentiles` aggregation {pull}26163[#26163] (issue: {issue}26095[#26095]) +* Use `global_ordinals_hash` execution mode when sorting by sub aggregations. {pull}26014[#26014] (issue: {issue}24359[#24359]) +* Add a specialized deferring collector for terms aggregator {pull}25190[#25190] + +Core:: +* Use Java 9 FilePermission model {pull}26302[#26302] (issue: {issue}21534[#21534]) +* Add friendlier message on bad keystore permissions {pull}26284[#26284] +* Epoch millis and second formats accept float implicitly {pull}26119[#26119] (issue: {issue}14641[#14641]) + +Internal:: +* Prevent cluster internal `ClusterState.Custom` impls to leak to a client {pull}26232[#26232] +* Use holder pattern for lazy deprecation loggers {pull}26218[#26218] (issue: {issue}26210[#26210]) +* Allow `ClusterState.Custom` to be created on initial cluster states {pull}26144[#26144] + +Java High Level REST Client:: +* Make RestHighLevelClient Closeable and simplify its creation {pull}26180[#26180] (issue: {issue}26086[#26086]) + +Mapping:: +* Loosen the restrictions on disabling _all in 6.x {pull}26259[#26259] + +Percolator:: +* Store the QueryBuilder's Writable representation instead of its XContent representation {pull}25456[#25456] +* Add support for selecting percolator query candidate matches containing wildcard / prefix queries {pull}25351[#25351] + +Settings:: +* Persist created keystore on startup unless keystore is present {pull}26253[#26253] (issue: {issue}26126[#26126]) +* Settings: Add keystore.seed auto generated secure setting {pull}26149[#26149] +* Settings: Add keystore creation to add commands {pull}26126[#26126] + +[float] +=== Bug Fixes + +Aggregations:: +* Check bucket metric ages point to a multi bucket agg {pull}26215[#26215] (issue: {issue}25775[#25775]) + +Allocation:: +* Fix DiskThresholdMonitor flood warning {pull}26204[#26204] (issue: {issue}26201[#26201]) +* Allow wildcards for shard IP filtering {pull}26187[#26187] (issues: {issue}22591[#22591], {issue}26184[#26184]) + +CRUD:: +* Serialize and expose timeout of acknowledged requests in REST layer {pull}26189[#26189] (issue: {issue}26213[#26213]) +* Fix silent loss of last command to _bulk and _msearch due to missing newline {pull}25740[#25740] (issue: {issue}7601[#7601]) + +Cluster:: +* Register setting `cluster.indices.tombstones.size` {pull}26193[#26193] (issue: {issue}26191[#26191]) + +Highlighting:: +* Fix nested query highlighting {pull}26305[#26305] (issue: {issue}26230[#26230]) + +Logging:: +* Allow not configure logging without config {pull}26209[#26209] (issues: {issue}20575[#20575], {issue}24076[#24076]) + +Mapping:: +* ICUCollationKeywordFieldMapper use SortedSetDocValuesField {pull}26267[#26267] +* Fix serialization of the `_all` field. {pull}26143[#26143] (issue: {issue}26136[#26136]) + +Network:: +* Release pipelined http responses on close {pull}26226[#26226] + +Packaging:: +* Detect modified keystore on package removal {pull}26300[#26300] +* Create keystore on RPM and Debian package install {pull}26282[#26282] +* Add safer empty variable checking for Windows {pull}26268[#26268] (issue: {issue}26261[#26261]) +* Export HOSTNAME environment variable {pull}26262[#26262] (issues: {issue}25807[#25807], {issue}26255[#26255]) +* Fix daemonization command status test {pull}26196[#26196] (issue: {issue}26080[#26080]) +* Set RuntimeDirectory in systemd service {pull}23526[#23526] + +Search:: +* Refactor simple_query_string to handle text part like multi_match and query_string {pull}26145[#26145] (issue: {issue}25726[#25726]) +* Fix `_exists_` in query_string on empty indices. {pull}25993[#25993] (issue: {issue}25956[#25956]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to lucene-7.0.0-snapshot-a128fcb. {pull}26090[#26090] + +[[release-notes-6.0.0-beta1]] +== {es} version 6.0.0-beta1 + +[float] +[[breaking-6.0.0-beta1]] +=== Breaking Changes + +Aggregations:: +* Change parsing of numeric `to` and `from` parameters in `date_range` aggregation {pull}25376[#25376] (issue: {issue}17920[#17920]) + +Aliases:: +* Wrong behavior deleting alias {pull}23997[#23997] (issues: {issue}10106[#10106], {issue}23960[#23960]) + +Highlighting:: +* Remove the postings highlighter and make unified the default highlighter choice {pull}25028[#25028] + +Index APIs:: +* Remove (deprecated) support for '+' in index expressions {pull}25274[#25274] (issue: {issue}24515[#24515]) +* Delete index API to work only against concrete indices {pull}25268[#25268] (issues: {issue}2318[#2318], {issue}23997[#23997]) + +Indexed Scripts/Templates:: +* Scripting: Remove search template actions {pull}25717[#25717] + +Ingest:: +* update ingest-user-agent regexes.yml {pull}25608[#25608] +* remove ingest.new_date_format {pull}25583[#25583] + +Java REST Client:: +* Remove deprecated created and found from index, delete and bulk {pull}25516[#25516] (issues: {issue}19566[#19566], {issue}19630[#19630], {issue}19633[#19633]) + +Packaging:: +* Remove support for ES_INCLUDE {pull}25804[#25804] +* Setup: Change default heap to 1G {pull}25695[#25695] +* Use config directory to find jvm.options {pull}25679[#25679] (issue: {issue}23004[#23004]) +* Remove implicit 32-bit support {pull}25435[#25435] +* Remove default path settings {pull}25408[#25408] (issue: {issue}25357[#25357]) +* Remove path.conf setting {pull}25392[#25392] (issue: {issue}25357[#25357]) +* Honor masking of systemd-sysctl.service {pull}24234[#24234] (issues: {issue}21899[#21899], {issue}806[#806]) + +Plugin Analysis ICU:: +* Upgrade icu4j for the ICU analysis plugin to 59.1 {pull}25243[#25243] (issue: {issue}21425[#21425]) + +Plugin Discovery Azure Classic:: +* Remove `discovery.type` BWC layer from the EC2/Azure/GCE plugins {pull}25080[#25080] (issue: {issue}24543[#24543]) + +Plugin Repository GCS:: +* GCS Repository: Remove specifying credential file on disk {pull}24727[#24727] + +Plugins:: +* Make plugin loading stricter {pull}25405[#25405] + +Query DSL:: +* Refactor QueryStringQuery for 6.0 {pull}25646[#25646] (issue: {issue}25574[#25574]) +* Change `split_on_whitespace` default to false {pull}25570[#25570] (issue: {issue}25470[#25470]) +* Remove deprecated template query {pull}24577[#24577] (issue: {issue}19390[#19390]) + +REST:: +* IndexClosedException to return 400 rather than 403 {pull}25752[#25752] +* Remove comma-separated feature parsing for GetIndicesAction {pull}24723[#24723] (issue: {issue}24437[#24437]) +* Improve REST error handling when endpoint does not support HTTP verb, add OPTIONS support {pull}24437[#24437] (issues: {issue}0[#0], {issue}15335[#15335], {issue}17916[#17916]) + +Scripting:: +* remove lang url parameter from stored script requests {pull}25779[#25779] (issue: {issue}22887[#22887]) +* Disallow lang to be used with Stored Scripts {pull}25610[#25610] +* Remove Deprecated Script Settings {pull}24756[#24756] (issue: {issue}24532[#24532]) +* Scripting: Remove native scripts {pull}24726[#24726] (issue: {issue}19966[#19966]) +* Scripting: Remove file scripts {pull}24627[#24627] (issue: {issue}21798[#21798]) + +Search:: +* Make `index` in TermsLookup mandatory {pull}25753[#25753] (issue: {issue}25750[#25750]) +* Removes FieldStats API {pull}25628[#25628] (issue: {issue}25577[#25577]) +* Remove deprecated fielddata_fields from search request {pull}25566[#25566] (issue: {issue}25537[#25537]) +* Removes deprecated fielddata_fields {pull}25537[#25537] (issue: {issue}19027[#19027]) + +Settings:: +* Settings: Remove shared setting property {pull}24728[#24728] +* Settings: Remove support for yaml and json config files {pull}24664[#24664] (issue: {issue}19391[#19391]) + +Similarities:: +* Similarity should accept dynamic settings when possible {pull}20339[#20339] (issue: {issue}6727[#6727]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Remove the unused SignificantTerms.compareTerm() method {pull}24714[#24714] +* Make SignificantTerms.Bucket an interface rather than an abstract class {pull}24670[#24670] (issue: {issue}24492[#24492]) + +Internal:: +* Collapses package structure for some bucket aggs {pull}25579[#25579] (issue: {issue}22868[#22868]) + +Java API:: +* Remove deprecated IdsQueryBuilder ctor {pull}25529[#25529] +* Removing unneeded getTookInMillis method {pull}23923[#23923] + +Java High Level REST Client:: +* Unify the result interfaces from get and search in Java client {pull}25361[#25361] (issue: {issue}16440[#16440]) +* Allow RestHighLevelClient to use plugins {pull}25024[#25024] + +Java REST Client:: +* Rename client artifacts {pull}25693[#25693] (issue: {issue}20248[#20248]) + +Plugin Delete By Query:: +* Move DeleteByQuery and Reindex requests into core {pull}24578[#24578] + +Query DSL:: +* Remove QueryParseContext {pull}25486[#25486] +* Remove QueryParseContext from parsing QueryBuilders {pull}25448[#25448] + +REST:: +* Return index name and empty map for /`{index}`/_alias with no aliases {pull}25114[#25114] (issues: {issue}24723[#24723], {issue}25090[#25090]) + +[float] +=== Deprecations + +Index APIs:: +* Deprecated use of + in index expressions {pull}24585[#24585] (issue: {issue}24515[#24515]) + +Indexed Scripts/Templates:: +* Scripting: Deprecate stored search template apis {pull}25437[#25437] (issue: {issue}24596[#24596]) + +Percolator:: +* Deprecate percolate query's document_type parameter. {pull}25199[#25199] + +Scripting:: +* Scripting: Change keys for inline/stored scripts to source/id {pull}25127[#25127] +* Scripting: Deprecate native scripts {pull}24692[#24692] (issue: {issue}19966[#19966]) +* Scripting: Deprecate index lookup {pull}24691[#24691] (issue: {issue}19359[#19359]) +* Deprecate Fine Grain Settings for Scripts {pull}24573[#24573] (issue: {issue}24532[#24532]) +* Scripting: Deprecate file script settings {pull}24555[#24555] (issue: {issue}21798[#21798]) +* Scripting: Deprecate file scripts {pull}24552[#24552] (issue: {issue}21798[#21798]) + +Settings:: +* Settings: Update settings deprecation from yml to yaml {pull}24663[#24663] (issue: {issue}19391[#19391]) + +Tribe Node:: +* Deprecate tribe service {pull}24598[#24598] (issue: {issue}24581[#24581]) + +[float] +=== New Features + +Analysis:: +* Expose simplepattern and simplepatternsplit tokenizers {pull}25159[#25159] (issue: {issue}23363[#23363]) +* Parse synonyms with the same analysis chain {pull}8049[#8049] (issue: {issue}7199[#7199]) + +Parent/Child:: +* Move parent_id query to the parent-join module {pull}25072[#25072] (issue: {issue}20257[#20257]) +* Introduce ParentJoinFieldMapper, a field mapper that creates parent/child relation within documents of the same index {pull}24978[#24978] (issue: {issue}20257[#20257]) + +Search:: +* Automatically early terminate search query based on index sorting {pull}24864[#24864] (issue: {issue}6720[#6720]) + +Sequence IDs:: +* Add a scheduled translog retention check {pull}25622[#25622] (issues: {issue}10708[#10708], {issue}25294[#25294]) +* Initialize sequence numbers on a shrunken index {pull}25321[#25321] (issue: {issue}10708[#10708]) +* Initialize primary term for shrunk indices {pull}25307[#25307] (issue: {issue}10708[#10708]) +* Introduce translog size and age based retention policies {pull}25147[#25147] (issue: {issue}10708[#10708]) + +Stats:: +* Adds nodes usage API to monitor usages of actions {pull}24169[#24169] + +Task Manager:: +* Task Management {pull}15117[#15117] + +Upgrade API:: +* TemplateUpgraders should be called during rolling restart {pull}25263[#25263] (issues: {issue}24379[#24379], {issue}24680[#24680]) + +[float] +=== Enhancements + +Aggregations:: +* Add strict parsing of aggregation ranges {pull}25769[#25769] +* Adds rewrite phase to aggregations {pull}25495[#25495] (issue: {issue}17676[#17676]) +* Tweak AggregatorBase.addRequestCircuitBreakerBytes {pull}25162[#25162] (issue: {issue}24511[#24511]) +* Add superset size to Significant Term REST response {pull}24865[#24865] +* Add document count to Matrix Stats aggregation response {pull}24776[#24776] +* Adds an implementation of LogLogBeta for the cardinality aggregation {pull}22323[#22323] (issue: {issue}22230[#22230]) + +Allocation:: +* Adjust status on bad allocation explain requests {pull}25503[#25503] (issue: {issue}25458[#25458]) +* Promote replica on the highest version node {pull}25277[#25277] (issue: {issue}10708[#10708]) + +Analysis:: +* [Analysis] Support normalizer in request param {pull}24767[#24767] (issue: {issue}23347[#23347]) +* Enforce validation for PathHierarchy tokenizer {pull}23510[#23510] +* [analysis-icu] Allow setting unicodeSetFilter {pull}20814[#20814] (issue: {issue}20820[#20820]) + +CAT API:: +* expand `/_cat/nodes` to return information about hard drive {pull}21775[#21775] (issue: {issue}21679[#21679]) + +Cluster:: +* Validate a joining node's version with version of existing cluster nodes {pull}25808[#25808] +* Switch indices read-only if a node runs out of disk space {pull}25541[#25541] (issue: {issue}24299[#24299]) +* Add a cluster block that allows to delete indices that are read-only {pull}24678[#24678] + +Core:: +* Add max file size bootstrap check {pull}25974[#25974] +* Add compatibility versions to main action response {pull}25799[#25799] +* Index ids in binary form. {pull}25352[#25352] (issues: {issue}18154[#18154], {issue}24615[#24615]) +* Explicitly reject duplicate data paths {pull}25178[#25178] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25097[#25097] +* Upgrade to lucene-7.0.0-snapshot-a0aef2f {pull}24775[#24775] +* Speed up PK lookups at index time. {pull}19856[#19856] + +Engine:: +* Add refresh stats tracking for realtime get {pull}25052[#25052] (issue: {issue}24806[#24806]) +* Introducing a translog deletion policy {pull}24950[#24950] + +Exceptions:: +* IllegalStateException: Only duplicated jar instead of classpath {pull}24953[#24953] + +Highlighting:: +* Picks offset source for the unified highlighter directly from the es mapping {pull}25747[#25747] (issue: {issue}25699[#25699]) + +Index APIs:: +* Let primary own its replication group {pull}25692[#25692] (issue: {issue}25485[#25485]) +* Create index request should return the index name {pull}25139[#25139] (issue: {issue}23044[#23044]) + +Ingest:: +* Add Ingest-Processor specific Rest Endpoints & Add Grok endpoint {pull}25059[#25059] (issue: {issue}24725[#24725]) +* Port support for commercial GeoIP2 databases from Logstash. {pull}24889[#24889] +* add `exclude_keys` option to KeyValueProcessor {pull}24876[#24876] (issue: {issue}23856[#23856]) +* Allow removing multiple fields in ingest processor {pull}24750[#24750] (issue: {issue}24622[#24622]) +* Add target_field parameter to ingest processors {pull}24133[#24133] (issues: {issue}23228[#23228], {issue}23682[#23682]) + +Inner Hits:: +* Reuse inner hit query weight {pull}24571[#24571] (issue: {issue}23917[#23917]) + +Internal:: +* Cleanup IndexFieldData visibility {pull}25900[#25900] +* Bump the min compat version to 5.6.0 {pull}25805[#25805] +* "shard started" should show index and shard ID {pull}25157[#25157] +* Break out clear scroll logic from TransportClearScrollAction {pull}25125[#25125] (issue: {issue}25094[#25094]) +* Add helper methods to TransportActionProxy to identify proxy actions and requests {pull}25124[#25124] +* Add remote cluster infrastructure to fetch discovery nodes. {pull}25123[#25123] (issue: {issue}25094[#25094]) +* Add the ability to set eager_global_ordinals in the new parent-join field {pull}25019[#25019] +* Disallow multiple parent-join fields per mapping {pull}25002[#25002] +* Remove the need for _UNRELEASED suffix in versions {pull}24798[#24798] (issue: {issue}24768[#24768]) +* Optimize the order of bytes in uuids for better compression. {pull}24615[#24615] (issue: {issue}18209[#18209]) + +Java API:: +* Always Accumulate Transport Exceptions {pull}25017[#25017] (issue: {issue}23099[#23099]) + +Java High Level REST Client:: +* [DOCS] restructure java clients docs pages {pull}25517[#25517] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25098[#25098] (issues: {issue}25024[#25024], {issue}25097[#25097]) +* Add support for clear scroll to high level REST client {pull}25038[#25038] +* Add search scroll method to high level REST client {pull}24938[#24938] (issue: {issue}23331[#23331]) +* Add search method to high level REST client {pull}24796[#24796] (issues: {issue}24794[#24794], {issue}24795[#24795]) + +Java REST Client:: +* Shade external dependencies in the rest client jar {pull}25780[#25780] (issue: {issue}25208[#25208]) +* RestClient uses system properties and system default SSLContext {pull}25757[#25757] (issue: {issue}23231[#23231]) + +Logging:: +* Prevent excessive disk consumption by log files {pull}25660[#25660] +* Use LRU set to reduce repeat deprecation messages {pull}25474[#25474] (issue: {issue}25457[#25457]) + +Mapping:: +* Better validation of `copy_to`. {pull}25983[#25983] +* Optimize `terms` queries on `ip` addresses to use a `PointInSetQuery` whenever possible. {pull}25669[#25669] (issue: {issue}25667[#25667]) + +Network:: +* Move TransportStats accounting into TcpTransport {pull}25251[#25251] +* Simplify connection closing and cleanups in TcpTransport {pull}25250[#25250] +* Disable the Netty recycler in the client {pull}24793[#24793] (issues: {issue}22452[#22452], {issue}24721[#24721]) +* Remove Netty logging hack {pull}24653[#24653] (issues: {issue}24469[#24469], {issue}5624[#5624], {issue}6568[#6568], {issue}6696[#6696]) + +Packaging:: +* Remove memlock suggestion from systemd service {pull}25979[#25979] +* Set address space limit in systemd service file {pull}25975[#25975] +* Version option should display if snapshot {pull}25970[#25970] +* Ignore JVM options before checking Java version {pull}25969[#25969] +* Also skip JAVA_TOOL_OPTIONS on Windows {pull}25968[#25968] +* Introduce elasticsearch-env for Windows {pull}25958[#25958] +* Introduce elasticsearch-env {pull}25815[#25815] (issue: {issue}20286[#20286]) +* Stop exporting HOSTNAME from scripts {pull}25807[#25807] + +Parent/Child:: +* Remove ParentJoinFieldSubFetchPhase {pull}25550[#25550] (issue: {issue}25363[#25363]) +* Support parent id being specified as number in the _source {pull}25547[#25547] + +Plugin Lang Painless:: +* Allow Custom Whitelists in Painless {pull}25557[#25557] +* Update Painless to Allow Augmentation from Any Class {pull}25360[#25360] +* Add Needs Methods to Painless Script Context Factories {pull}25267[#25267] +* Support Script Context Stateful Factory in Painless {pull}25233[#25233] +* Generate Painless Factory for Creating Script Instances {pull}25120[#25120] +* Update Painless to Use New Script Contexts {pull}25015[#25015] +* Optimize instance creation in LambdaBootstrap {pull}24618[#24618] + +Plugin Repository GCS:: +* GCS Repository: Add secure storage of credentials {pull}24697[#24697] + +Plugin Repository S3:: +* S3 Repository: Add back repository level credentials {pull}24609[#24609] + +Plugins:: +* Move tribe to a module {pull}25778[#25778] +* Plugins can register pre-configured char filters {pull}25000[#25000] (issue: {issue}23658[#23658]) +* Add purge option to remove plugin CLI {pull}24981[#24981] +* Allow plugins to register pre-configured tokenizers {pull}24751[#24751] (issues: {issue}24223[#24223], {issue}24572[#24572]) +* Move ReindexAction class to core {pull}24684[#24684] (issue: {issue}24578[#24578]) +* Make PreConfiguredTokenFilter harder to misuse {pull}24572[#24572] (issue: {issue}23658[#23658]) + +Query DSL:: +* Make slop optional when parsing `span_near` query {pull}25677[#25677] (issue: {issue}25642[#25642]) +* Require a field when a `seed` is provided to the `random_score` function. {pull}25594[#25594] (issue: {issue}25240[#25240]) + +REST:: +* Refactor PathTrie and RestController to use a single trie for all methods {pull}25459[#25459] (issue: {issue}24437[#24437]) +* Make ObjectParser support string to boolean conversion {pull}24668[#24668] (issue: {issue}21802[#21802]) + +Recovery:: +* Goodbye, Translog Views {pull}25962[#25962] +* Disallow multiple concurrent recovery attempts for same target shard {pull}25428[#25428] +* Live primary-replica resync (no rollback) {pull}24841[#24841] (issue: {issue}10708[#10708]) + +Scripting:: +* Scripting: Rename SearchScript.needsScores to needs_score {pull}25235[#25235] +* Scripting: Add optional context parameter to put stored script requests {pull}25014[#25014] +* Add New Security Script Settings {pull}24637[#24637] (issue: {issue}24532[#24532]) + +Search:: +* Rewrite search requests on the coordinating nodes {pull}25814[#25814] (issue: {issue}25791[#25791]) +* Ensure query resources are fetched asynchronously during rewrite {pull}25791[#25791] +* Introduce a new Rewriteable interface to streamline rewriting {pull}25788[#25788] +* Reduce the scope of `QueryRewriteContext` {pull}25787[#25787] +* Reduce the overhead of timeouts and low-level search cancellation. {pull}25776[#25776] +* Reduce profiling overhead. {pull}25772[#25772] (issue: {issue}24799[#24799]) +* Prevent `can_match` requests from sending to incompatible nodes {pull}25705[#25705] (issue: {issue}25704[#25704]) +* Add a shard filter search phase to pre-filter shards based on query rewriting {pull}25658[#25658] +* Ensure we rewrite common queries to `match_none` if possible {pull}25650[#25650] +* Limit the number of concurrent shard requests per search request {pull}25632[#25632] +* Add cluster name validation to RemoteClusterConnection {pull}25568[#25568] +* Speed up sorted scroll when the index sort matches the search sort {pull}25138[#25138] (issue: {issue}6720[#6720]) +* Leverage scorerSupplier when applicable. {pull}25109[#25109] +* Add Cross Cluster Search support for scroll searches {pull}25094[#25094] +* Track EWMA[1] of task execution time in search threadpool executor {pull}24989[#24989] (issue: {issue}24915[#24915]) +* Query range fields by doc values when they are expected to be more efficient than points {pull}24823[#24823] (issue: {issue}24314[#24314]) +* Search: Fairer balancing when routing searches by session ID {pull}24671[#24671] (issue: {issue}24642[#24642]) + +Sequence IDs:: +* Move primary term from ReplicationRequest to ConcreteShardRequest {pull}25822[#25822] +* Add reason to global checkpoint updates on replica {pull}25612[#25612] (issue: {issue}10708[#10708]) +* Introduce primary/replica mode for GlobalCheckPointTracker {pull}25468[#25468] +* Throw back replica local checkpoint on new primary {pull}25452[#25452] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Update global checkpoint when increasing primary term on replica {pull}25422[#25422] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Enable a long translog retention policy by default {pull}25294[#25294] (issues: {issue}10708[#10708], {issue}25147[#25147]) +* Introduce primary context {pull}25122[#25122] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Block older operations on primary term transition {pull}24779[#24779] (issue: {issue}10708[#10708]) + +Settings:: +* Add disk threshold settings validation {pull}25600[#25600] (issue: {issue}25560[#25560]) +* Enable cross-setting validation {pull}25560[#25560] (issue: {issue}25541[#25541]) +* Validate `transport.profiles.*` settings {pull}25508[#25508] +* Cleanup network / transport related settings {pull}25489[#25489] +* Emit settings deprecation logging at most once {pull}25457[#25457] +* IndexMetaData: Introduce internal format index setting {pull}25292[#25292] + +Snapshot/Restore:: +* Improves snapshot logging and snapshot deletion error handling {pull}25264[#25264] + +Stats:: +* Update `IndexShard#refreshMetric` via a `ReferenceManager.RefreshListener` {pull}25083[#25083] (issues: {issue}24806[#24806], {issue}25052[#25052]) + +Translog:: +* Translog file recovery should not rely on lucene commits {pull}25005[#25005] (issue: {issue}24950[#24950]) + +[float] +=== Bug Fixes + +Aggregations:: +* Fixes array out of bounds for value count agg {pull}26038[#26038] (issue: {issue}17379[#17379]) +* Aggregations bug: Significant_text fails on arrays of text. {pull}25030[#25030] (issue: {issue}25029[#25029]) + +Aliases:: +* mget with an alias shouldn't ignore alias routing {pull}25697[#25697] (issue: {issue}25696[#25696]) +* GET aliases should 404 if aliases are missing {pull}25043[#25043] (issue: {issue}24644[#24644]) + +Analysis:: +* Pre-configured shingle filter should disable graph analysis {pull}25853[#25853] (issue: {issue}25555[#25555]) + +Circuit Breakers:: +* Checks the circuit breaker before allocating bytes for a new big array {pull}25010[#25010] (issue: {issue}24790[#24790]) + +Core:: +* Release operation permit on thread-pool rejection {pull}25930[#25930] (issue: {issue}25863[#25863]) +* Node should start up despite of a lingering `.es_temp_file` {pull}21210[#21210] (issue: {issue}21007[#21007]) + +Discovery:: +* MasterNodeChangePredicate should use the node instance to detect master change {pull}25877[#25877] (issue: {issue}25471[#25471]) + +Engine:: +* Engine - do not index operations with seq# lower than the local checkpoint into lucene {pull}25827[#25827] (issues: {issue}1[#1], {issue}2[#2], {issue}25592[#25592]) + +Geo:: +* Fix typo in GeoUtils#isValidLongitude {pull}25121[#25121] + +Highlighting:: +* FastVectorHighlighter should not cache the field query globally {pull}25197[#25197] (issue: {issue}25171[#25171]) +* Higlighters: Fix MultiPhrasePrefixQuery rewriting {pull}25103[#25103] (issue: {issue}25088[#25088]) + +Index APIs:: +* Shrink API should ignore templates {pull}25380[#25380] (issue: {issue}25035[#25035]) +* Rollover max docs should only count primaries {pull}24977[#24977] (issue: {issue}24217[#24217]) + +Ingest:: +* Sort Processor does not have proper behavior with targetField {pull}25237[#25237] (issue: {issue}24133[#24133]) +* fix grok's pattern parsing to validate pattern names in expression {pull}25063[#25063] (issue: {issue}22831[#22831]) + +Inner Hits:: +* When fetching nested inner hits only access stored fields when needed {pull}25864[#25864] (issue: {issue}6[#6]) + +Internal:: +* Fix BytesReferenceStreamInput#skip with offset {pull}25634[#25634] +* Fix race condition in RemoteClusterConnection node supplier {pull}25432[#25432] +* Initialise empty lists in BaseTaskResponse constructor {pull}25290[#25290] +* Extract a common base class for scroll executions {pull}24979[#24979] (issue: {issue}16555[#16555]) +* Obey lock order if working with store to get metadata snapshots {pull}24787[#24787] (issue: {issue}24481[#24481]) +* Fix Version based BWC and set correct minCompatVersion {pull}24732[#24732] +* Fix `_field_caps` serialization in order to support cross cluster search {pull}24722[#24722] +* Avoid race when shutting down controller processes {pull}24579[#24579] + +Mapping:: +* Fix parsing of ip range queries. {pull}25768[#25768] (issue: {issue}25636[#25636]) +* Disable date field mapping changing {pull}25285[#25285] (issue: {issue}25271[#25271]) +* Correctly enable _all for older 5.x indices {pull}25087[#25087] (issue: {issue}25068[#25068]) +* token_count datatype should handle null value {pull}25046[#25046] (issue: {issue}24928[#24928]) +* keep _parent field while updating child type mapping {pull}24407[#24407] (issue: {issue}23381[#23381]) + +More Like This:: +* Pass over _routing value with more_like_this items to be retrieved {pull}24679[#24679] (issue: {issue}23699[#23699]) + +Nested Docs:: +* In case of a single type the _id field should be added to the nested document instead of _uid field {pull}25149[#25149] + +Network:: +* Ensure pending transport handlers are invoked for all channel failures {pull}25150[#25150] +* Notify onConnectionClosed rather than onNodeDisconnect to prune transport handlers {pull}24639[#24639] (issues: {issue}24557[#24557], {issue}24575[#24575], {issue}24632[#24632]) + +Packaging:: +* Exit Windows scripts promptly on failure {pull}25959[#25959] +* Pass config path as a system property {pull}25943[#25943] +* ES_HOME needs to be made absolute before attempt at traversal {pull}25865[#25865] +* Fix elasticsearch-keystore handling of path.conf {pull}25811[#25811] +* Stop disabling explicit GC {pull}25759[#25759] +* Avoid failing install if system-sysctl is masked {pull}25657[#25657] (issue: {issue}24234[#24234]) +* Get short path name for native controllers {pull}25344[#25344] +* When stopping via systemd only kill the JVM, not its control group {pull}25195[#25195] +* remove remaining references to scripts directory {pull}24771[#24771] +* Handle parentheses in batch file path {pull}24731[#24731] (issue: {issue}24712[#24712]) + +Parent/Child:: +* The default _parent field should not try to load global ordinals {pull}25851[#25851] (issue: {issue}25849[#25849]) + +Percolator:: +* Fix range queries with date range based on current time in percolator queries. {pull}24666[#24666] (issue: {issue}23921[#23921]) + +Plugin Lang Painless:: +* Painless: allow doubles to be casted to longs. {pull}25936[#25936] + +Plugin Repository Azure:: +* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) + +Plugin Repository GCS:: +* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) + +Plugin Repository HDFS:: +* Upgrading HDFS Repository Plugin to use HDFS 2.8.1 Client {pull}25497[#25497] (issue: {issue}25450[#25450]) + +Plugin Repository S3:: +* Avoid SecurityException in repository-S3 on DefaultS3OutputStream.flush() {pull}25254[#25254] (issue: {issue}25192[#25192]) + +Plugins:: +* X-Pack plugin download fails on Windows desktop {pull}24570[#24570] + +Query DSL:: +* SpanNearQueryBuilder should return the inner clause when a single clause is provided {pull}25856[#25856] (issue: {issue}25630[#25630]) +* Refactor field expansion for match, multi_match and query_string query {pull}25726[#25726] (issues: {issue}25551[#25551], {issue}25556[#25556]) +* WrapperQueryBuilder should also rewrite the parsed query {pull}25480[#25480] + +REST:: +* Fix handling of invalid error trace parameter {pull}25785[#25785] (issue: {issue}25774[#25774]) +* Fix handling of exceptions thrown on HEAD requests {pull}25172[#25172] (issue: {issue}21125[#21125]) +* Fixed NPEs caused by requests without content. {pull}23497[#23497] (issue: {issue}24701[#24701]) +* Fix get mappings HEAD requests {pull}23192[#23192] (issue: {issue}21125[#21125]) + +Recovery:: +* Close translog view after primary-replica resync {pull}25862[#25862] (issue: {issue}24841[#24841]) + +Reindex API:: +* Reindex: don't duplicate _source parameter {pull}24629[#24629] (issue: {issue}24628[#24628]) +* Add qa module that tests reindex-from-remote against pre-5.0 versions of Elasticsearch {pull}24561[#24561] (issues: {issue}23828[#23828], {issue}24520[#24520]) + +Search:: +* Caching a MinDocQuery can lead to wrong results. {pull}25909[#25909] +* Fix random score generation when no seed is provided. {pull}25908[#25908] +* Merge FunctionScoreQuery and FiltersFunctionScoreQuery {pull}25889[#25889] (issues: {issue}15709[#15709], {issue}23628[#23628]) +* Respect cluster alias in `_index` aggs and queries {pull}25885[#25885] (issue: {issue}25606[#25606]) +* First increment shard stats before notifying and potentially sending response {pull}25818[#25818] +* Remove assertion about deviation when casting to a float. {pull}25806[#25806] (issue: {issue}25330[#25330]) +* Prevent skipping shards if a suggest builder is present {pull}25739[#25739] (issue: {issue}25658[#25658]) +* Ensure remote cluster alias is preserved in inner hits aggs {pull}25627[#25627] (issue: {issue}25606[#25606]) +* Do not search locally if remote index pattern resolves to no indices {pull}25436[#25436] (issue: {issue}25426[#25426]) +* Adds check for negative search request size {pull}25397[#25397] (issue: {issue}22530[#22530]) +* Make sure range queries are correctly profiled. {pull}25108[#25108] +* Fix RangeFieldMapper rangeQuery to properly handle relations {pull}24808[#24808] (issue: {issue}24744[#24744]) +* Fix ExpandSearchPhase when response contains no hits {pull}24688[#24688] (issue: {issue}24672[#24672]) + +Sequence IDs:: +* Fix pre-6.0 response to unknown replication actions {pull}25744[#25744] (issue: {issue}10708[#10708]) +* Track local checkpoint on primary immediately {pull}25434[#25434] (issues: {issue}10708[#10708], {issue}25355[#25355], {issue}25415[#25415]) +* Initialize max unsafe auto ID timestamp on shrink {pull}25356[#25356] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Use correct primary term for replicating NOOPs {pull}25128[#25128] +* Handle already closed while filling gaps {pull}25021[#25021] (issue: {issue}24925[#24925]) + +Settings:: +* Fix settings serialization to not serialize secure settings or not take the total size into account {pull}25323[#25323] +* Keystore CLI should use the AddFileKeyStoreCommand for files {pull}25298[#25298] +* Allow resetting settings that use an IP validator {pull}24713[#24713] (issue: {issue}24709[#24709]) + +Snapshot/Restore:: +* Snapshot/Restore: Ensure that shard failure reasons are correctly stored in CS {pull}25941[#25941] (issue: {issue}25878[#25878]) +* Output all empty snapshot info fields if in verbose mode {pull}25455[#25455] (issue: {issue}24477[#24477]) +* Remove redundant and broken MD5 checksum from repository-s3 {pull}25270[#25270] (issue: {issue}25269[#25269]) +* Consolidates the logic for cleaning up snapshots on master election {pull}24894[#24894] (issue: {issue}24605[#24605]) +* Removes completed snapshot from cluster state on master change {pull}24605[#24605] (issue: {issue}24452[#24452]) + +Stats:: +* _nodes/stats should not fail due to concurrent AlreadyClosedException {pull}25016[#25016] (issue: {issue}23099[#23099]) + +Suggesters:: +* Context suggester should filter doc values field {pull}25858[#25858] (issue: {issue}25404[#25404]) + +[float] +=== Regressions + +Highlighting:: +* Fix Fast Vector Highlighter NPE on match phrase prefix {pull}25116[#25116] (issue: {issue}25088[#25088]) + +Search:: +* Always use DisjunctionMaxQuery to build cross fields disjunction {pull}25115[#25115] (issue: {issue}23966[#23966]) + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Network:: +* Upgrade to Netty 4.1.13.Final {pull}25581[#25581] (issues: {issue}24729[#24729], {issue}6866[#6866]) +* Upgrade to Netty 4.1.11.Final {pull}24652[#24652] + +Upgrade API:: +* Improve stability and logging of TemplateUpgradeServiceIT tests {pull}25386[#25386] (issue: {issue}25382[#25382]) + +[[release-notes-6.0.0-alpha2]] +== {es} version 6.0.0-alpha2 + +[float] +[[breaking-6.0.0-alpha2]] +=== Breaking Changes + +CRUD:: +* Deleting a document from a non-existing index creates the indexIf the index does not exist, delete document will not auto create it {pull}24518[#24518] (issue: {issue}15425[#15425]) + +Plugin Analysis ICU:: +* Upgrade icu4j to latest version {pull}24821[#24821] + +Plugin Repository S3:: +* Remove deprecated S3 settings {pull}24445[#24445] + +Scripting:: +* Remove script access to term statistics {pull}19462[#19462] (issue: {issue}19359[#19359]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Make Terms.Bucket an interface rather than an abstract class {pull}24492[#24492] +* Compound order for histogram aggregations {pull}22343[#22343] (issues: {issue}14771[#14771], {issue}20003[#20003], {issue}23613[#23613]) + +Plugins:: +* Drop name from TokenizerFactory {pull}24869[#24869] + +[float] +=== Deprecations + +Settings:: +* Deprecate settings in .yml and .json {pull}24059[#24059] (issue: {issue}19391[#19391]) + +[float] +=== New Features + +Aggregations:: +* SignificantText aggregation - like significant_terms, but for text {pull}24432[#24432] (issue: {issue}23674[#23674]) + +Internal:: +* Automatically adjust search threadpool queue_size {pull}23884[#23884] (issue: {issue}3890[#3890]) + +Mapping:: +* Add new ip_range field type {pull}24433[#24433] + +Plugin Analysis ICU:: +* Add ICUCollationFieldMapper {pull}24126[#24126] + +[float] +=== Enhancements + +Core:: +* Improve bootstrap checks error messages {pull}24548[#24548] + +Engine:: +* Move the IndexDeletionPolicy to be engine internal {pull}24930[#24930] (issue: {issue}10708[#10708]) + +Internal:: +* Add assertions enabled helper {pull}24834[#24834] + +Java High Level REST Client:: +* Add doc_count to ParsedMatrixStats {pull}24952[#24952] (issue: {issue}24776[#24776]) +* Add fromXContent method to ClearScrollResponse {pull}24909[#24909] +* ClearScrollRequest to implement ToXContentObject {pull}24907[#24907] +* SearchScrollRequest to implement ToXContentObject {pull}24906[#24906] (issue: {issue}3889[#3889]) +* Add aggs parsers for high level REST Client {pull}24824[#24824] (issues: {issue}23965[#23965], {issue}23973[#23973], {issue}23974[#23974], {issue}24085[#24085], {issue}24160[#24160], {issue}24162[#24162], {issue}24182[#24182], {issue}24183[#24183], {issue}24208[#24208], {issue}24213[#24213], {issue}24239[#24239], {issue}24284[#24284], {issue}24312[#24312], {issue}24330[#24330], {issue}24365[#24365], {issue}24371[#24371], {issue}24442[#24442], {issue}24521[#24521], {issue}24524[#24524], {issue}24564[#24564], {issue}24583[#24583], {issue}24589[#24589], {issue}24648[#24648], {issue}24667[#24667], {issue}24675[#24675], {issue}24682[#24682], {issue}24700[#24700], {issue}24706[#24706], {issue}24717[#24717], {issue}24720[#24720], {issue}24738[#24738], {issue}24746[#24746], {issue}24789[#24789], {issue}24791[#24791], {issue}24794[#24794], {issue}24796[#24796], {issue}24822[#24822]) + +Mapping:: +* Identify documents by their `_id`. {pull}24460[#24460] + +Packaging:: +* Set number of processes in systemd unit file {pull}24970[#24970] (issue: {issue}20874[#20874]) + +Plugin Lang Painless:: +* Make Painless Compiler Use an Instance Per Context {pull}24972[#24972] +* Make PainlessScript An Interface {pull}24966[#24966] + +Recovery:: +* Introduce primary context {pull}25031[#25031] (issue: {issue}10708[#10708]) + +Scripting:: +* Add StatefulFactoryType as optional intermediate factory in script contexts {pull}24974[#24974] (issue: {issue}20426[#20426]) +* Make contexts available to ScriptEngine construction {pull}24896[#24896] +* Make ScriptEngine.compile generic on the script context {pull}24873[#24873] +* Add instance and compiled classes to script contexts {pull}24868[#24868] + +Search:: +* Eliminate array access in tight loops when profiling is enabled. {pull}24959[#24959] +* Support Multiple Inner Hits on a Field Collapse Request {pull}24517[#24517] +* Expand cross cluster search indices for search requests to the concrete index or to it's aliases {pull}24502[#24502] + +Search Templates:: +* Add max concurrent searches to multi template search {pull}24255[#24255] (issues: {issue}20912[#20912], {issue}21907[#21907]) + +Sequence IDs:: +* Fill gaps on primary promotion {pull}24945[#24945] (issue: {issue}10708[#10708]) +* Introduce clean transition on primary promotion {pull}24925[#24925] (issue: {issue}10708[#10708]) +* Guarantee that translog generations are seqNo conflict free {pull}24825[#24825] (issues: {issue}10708[#10708], {issue}24779[#24779]) +* Inline global checkpoints {pull}24513[#24513] (issue: {issue}10708[#10708]) + +Snapshot/Restore:: +* Enhances get snapshots API to allow retrieving repository index only {pull}24477[#24477] (issue: {issue}24288[#24288]) + +[float] +=== Bug Fixes + +Aggregations:: +* Terms aggregation should remap global ordinal buckets when a sub-aggregator is used to sort the terms {pull}24941[#24941] (issue: {issue}24788[#24788]) +* Correctly set doc_count when MovAvg "predicts" values on existing buckets {pull}24892[#24892] (issue: {issue}24327[#24327]) +* DateHistogram: Fix `extended_bounds` with `offset` {pull}23789[#23789] (issue: {issue}23776[#23776]) +* Fix ArrayIndexOutOfBoundsException when no ranges are specified in the query {pull}23241[#23241] (issue: {issue}22881[#22881]) + +Analysis:: +* PatternAnalyzer should lowercase wildcard queries when `lowercase` is true. {pull}24967[#24967] + +Cache:: +* fix bug of weight computation {pull}24856[#24856] + +Core:: +* Fix cache expire after access {pull}24546[#24546] + +Index APIs:: +* Validates updated settings on closed indices {pull}24487[#24487] (issue: {issue}23787[#23787]) + +Ingest:: +* Fix floating-point error when DateProcessor parses UNIX {pull}24947[#24947] +* add option for _ingest.timestamp to use new ZonedDateTime (5.x backport) {pull}24030[#24030] (issues: {issue}23168[#23168], {issue}23174[#23174]) + +Inner Hits:: +* Fix Source filtering in new field collapsing feature {pull}24068[#24068] (issue: {issue}24063[#24063]) + +Internal:: +* Ensure remote cluster is connected before fetching `_field_caps` {pull}24845[#24845] (issue: {issue}24763[#24763]) + +Network:: +* Fix error message if an incompatible node connects {pull}24884[#24884] + +Plugins:: +* Fix plugin installation permissions {pull}24527[#24527] (issue: {issue}24480[#24480]) + +Scroll:: +* Fix single shard scroll within a cluster with nodes in version `>= 5.3` and `<= 5.3` {pull}24512[#24512] + +Search:: +* Fix script field sort returning Double.MAX_VALUE for all documents {pull}24942[#24942] (issue: {issue}24940[#24940]) +* Compute the took time of the query after the expand phase of field collapsing {pull}24902[#24902] (issue: {issue}24900[#24900]) + +Sequence IDs:: +* Handle primary failure handling replica response {pull}24926[#24926] (issue: {issue}24935[#24935]) + +Snapshot/Restore:: +* Fix inefficient (worst case exponential) loading of snapshot repository {pull}24510[#24510] (issue: {issue}24509[#24509]) + +Stats:: +* Avoid double decrement on current query counter {pull}24922[#24922] (issues: {issue}22996[#22996], {issue}24872[#24872]) +* Adjust available and free bytes to be non-negative on huge FSes {pull}24911[#24911] (issues: {issue}23093[#23093], {issue}24453[#24453]) + +Suggesters:: +* Fix context suggester to read values from keyword type field {pull}24200[#24200] (issue: {issue}24129[#24129]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.0.0-alpha1]] +== {es} version 6.0.0-alpha1 + +[float] +[[breaking-6.0.0-alpha1]] +=== Breaking Changes + + +Allocation:: +* Remove `cluster.routing.allocation.snapshot.relocation_enabled` setting {pull}20994[#20994] + +Analysis:: +* Removing query-string parameters in `_analyze` API {pull}20704[#20704] (issue: {issue}20246[#20246]) + +CAT API:: +* Write -1 on unbounded queue in cat thread pool {pull}21342[#21342] (issue: {issue}21187[#21187]) + +CRUD:: +* Disallow `VersionType.FORCE` for GetRequest {pull}21079[#21079] (issue: {issue}20995[#20995]) +* Disallow `VersionType.FORCE` versioning for 6.x indices {pull}20995[#20995] (issue: {issue}20377[#20377]) + +Cluster:: +* No longer allow cluster name in data path {pull}20433[#20433] (issue: {issue}20391[#20391]) + +Core:: +* Simplify file store {pull}24402[#24402] (issue: {issue}24390[#24390]) +* Make boolean conversion strict {pull}22200[#22200] +* Remove the `default` store type. {pull}21616[#21616] +* Remove store throttling. {pull}21573[#21573] + +Geo:: +* Remove deprecated geo search features {pull}22876[#22876] +* Reduce GeoDistance Insanity {pull}19846[#19846] + +Index APIs:: +* Open/Close index api to allow_no_indices by default {pull}24401[#24401] (issues: {issue}24031[#24031], {issue}24341[#24341]) +* Remove support for controversial `ignore_unavailable` and `allow_no_indices` from indices exists api {pull}20712[#20712] + +Index Templates:: +* Allows multiple patterns to be specified for index templates {pull}21009[#21009] (issue: {issue}20690[#20690]) + +Java API:: +* Enforce Content-Type requirement on the rest layer and remove deprecated methods {pull}23146[#23146] (issue: {issue}19388[#19388]) + +Mapping:: +* Enforce at most one type. {pull}24428[#24428] (issue: {issue}24317[#24317]) +* Disallow `include_in_all` for 6.0+ indices {pull}22970[#22970] (issue: {issue}22923[#22923]) +* Disable _all by default, disallow configuring _all on 6.0+ indices {pull}22144[#22144] (issues: {issue}19784[#19784], {issue}20925[#20925], {issue}21341[#21341]) +* Throw an exception on unrecognized "match_mapping_type" {pull}22090[#22090] (issue: {issue}17285[#17285]) + +Network:: +* Remove blocking TCP clients and servers {pull}22639[#22639] +* Remove `modules/transport_netty_3` in favor of `netty_4` {pull}21590[#21590] +* Remove LocalTransport in favor of MockTcpTransport {pull}20695[#20695] + +Packaging:: +* Remove customization of ES_USER and ES_GROUP {pull}23989[#23989] (issue: {issue}23848[#23848]) + +Percolator:: +* Remove deprecated percolate and mpercolate apis {pull}22331[#22331] + +Plugin Delete By Query:: +* Require explicit query in _delete_by_query API {pull}23632[#23632] (issue: {issue}23629[#23629]) + +Plugin Discovery EC2:: +* Ec2 Discovery: Cleanup deprecated settings {pull}24150[#24150] +* Discovery EC2: Remove region setting {pull}23991[#23991] (issue: {issue}22758[#22758]) +* AWS Plugins: Remove signer type setting {pull}23984[#23984] (issue: {issue}22599[#22599]) + +Plugin Lang JS:: +* Remove lang-python and lang-javascript {pull}20734[#20734] (issue: {issue}20698[#20698]) + +Plugin Mapper Attachment:: +* Remove mapper attachments plugin {pull}20416[#20416] (issue: {issue}18837[#18837]) + +Plugin Repository Azure:: +* Remove global `repositories.azure` settings {pull}23262[#23262] (issues: {issue}22800[#22800], {issue}22856[#22856]) +* Remove auto creation of container for azure repository {pull}22858[#22858] (issue: {issue}22857[#22857]) + +Plugin Repository S3:: +* S3 Repository: Cleanup deprecated settings {pull}24097[#24097] +* S3 Repository: Remove region setting {pull}22853[#22853] (issue: {issue}22758[#22758]) +* S3 Repository: Remove bucket auto create {pull}22846[#22846] (issue: {issue}22761[#22761]) +* S3 Repository: Remove env var and sysprop credentials support {pull}22842[#22842] + +Query DSL:: +* Remove deprecated `minimum_number_should_match` in BoolQueryBuilder {pull}22416[#22416] +* Remove support for empty queries {pull}22092[#22092] (issue: {issue}17624[#17624]) +* Remove deprecated query names: in, geo_bbox, mlt, fuzzy_match and match_fuzzy {pull}21852[#21852] +* The `terms` query should always map to a Lucene `TermsQuery`. {pull}21786[#21786] +* Be strict when parsing values searching for booleans {pull}21555[#21555] (issue: {issue}21545[#21545]) +* Remove collect payloads parameter {pull}20385[#20385] + +REST:: +* Remove ldjson support and document ndjson for bulk/msearch {pull}23049[#23049] (issue: {issue}23025[#23025]) +* Enable strict duplicate checks for all XContent types {pull}22225[#22225] (issues: {issue}19614[#19614], {issue}22073[#22073]) +* Enable strict duplicate checks for JSON content {pull}22073[#22073] (issue: {issue}19614[#19614]) +* Remove lenient stats parsing {pull}21417[#21417] (issues: {issue}20722[#20722], {issue}21410[#21410]) +* Remove allow unquoted JSON {pull}20388[#20388] (issues: {issue}17674[#17674], {issue}17801[#17801]) +* Remove FORCE version_type {pull}20377[#20377] (issue: {issue}19769[#19769]) + +Scripting:: +* Make dates be ReadableDateTimes in scripts {pull}22948[#22948] (issue: {issue}22875[#22875]) +* Remove groovy scripting language {pull}21607[#21607] + +Search:: +* ProfileResult and CollectorResult should print machine readable timing information {pull}22561[#22561] +* Remove indices query {pull}21837[#21837] (issue: {issue}17710[#17710]) +* Remove ignored type parameter in search_shards api {pull}21688[#21688] + +Sequence IDs:: +* Change certain replica failures not to fail the replica shard {pull}22874[#22874] (issue: {issue}10708[#10708]) + +Shadow Replicas:: +* Remove shadow replicas {pull}23906[#23906] (issue: {issue}22024[#22024]) + +[float] +=== Breaking Java Changes + +Java API:: +* Java api: ActionRequestBuilder#execute to return a PlainActionFuture {pull}24415[#24415] (issues: {issue}24412[#24412], {issue}9201[#9201]) + +Network:: +* Simplify TransportAddress {pull}20798[#20798] + +[float] +=== Deprecations + +Index Templates:: +* Restore deprecation warning for invalid match_mapping_type values {pull}22304[#22304] + +Internal:: +* Deprecate XContentType auto detection methods in XContentFactory {pull}22181[#22181] (issue: {issue}19388[#19388]) + +[float] +=== New Features + +Core:: +* Enable index-time sorting {pull}24055[#24055] (issue: {issue}6720[#6720]) + +[float] +=== Enhancements + +Aggregations:: +* Agg builder accessibility fixes {pull}24323[#24323] +* Remove support for the include/pattern syntax. {pull}23141[#23141] (issue: {issue}22933[#22933]) +* Promote longs to doubles when a terms agg mixes decimal and non-decimal numbers {pull}22449[#22449] (issue: {issue}22232[#22232]) + +Analysis:: +* Match- and MultiMatchQueryBuilder should only allow setting analyzer on string values {pull}23684[#23684] (issue: {issue}21665[#21665]) + +Bulk:: +* Simplify bulk request execution {pull}20109[#20109] + +CRUD:: +* Added validation for upsert request {pull}24282[#24282] (issue: {issue}16671[#16671]) + +Cluster:: +* Separate publishing from applying cluster states {pull}24236[#24236] +* Adds cluster state size to /_cluster/state response {pull}23440[#23440] (issue: {issue}3415[#3415]) + +Core:: +* Remove connect SocketPermissions from core {pull}22797[#22797] +* Add repository-url module and move URLRepository {pull}22752[#22752] (issue: {issue}22116[#22116]) +* Remove accept SocketPermissions from core {pull}22622[#22622] (issue: {issue}22116[#22116]) +* Move IfConfig.logIfNecessary call into bootstrap {pull}22455[#22455] (issue: {issue}22116[#22116]) +* Remove artificial default processors limit {pull}20874[#20874] (issue: {issue}20828[#20828]) +* Simplify write failure handling {pull}19105[#19105] (issue: {issue}20109[#20109]) + +Engine:: +* Fill missing sequence IDs up to max sequence ID when recovering from store {pull}24238[#24238] (issue: {issue}10708[#10708]) +* Use sequence numbers to identify out of order delivery in replicas & recovery {pull}24060[#24060] (issue: {issue}10708[#10708]) +* Add replica ops with version conflict to translog {pull}22626[#22626] +* Clarify global checkpoint recovery {pull}21934[#21934] (issue: {issue}21254[#21254]) + +Internal:: +* Try to convince the JVM not to lose stacktraces {pull}24426[#24426] (issue: {issue}24376[#24376]) +* Make document write requests immutable {pull}23038[#23038] + +Java High Level REST Client:: +* Add info method to High Level Rest client {pull}23350[#23350] +* Add support for named xcontent parsers to high level REST client {pull}23328[#23328] +* Add BulkRequest support to High Level Rest client {pull}23312[#23312] +* Add UpdateRequest support to High Level Rest client {pull}23266[#23266] +* Add delete API to the High Level Rest Client {pull}23187[#23187] +* Add Index API to High Level Rest Client {pull}23040[#23040] +* Add get/exists method to RestHighLevelClient {pull}22706[#22706] +* Add fromxcontent methods to delete response {pull}22680[#22680] (issue: {issue}22229[#22229]) +* Add REST high level client gradle submodule and first simple method {pull}22371[#22371] + +Java REST Client:: +* Wrap rest httpclient with doPrivileged blocks {pull}22603[#22603] (issue: {issue}22116[#22116]) + +Mapping:: +* Date detection should not rely on a hardcoded set of characters. {pull}22171[#22171] (issue: {issue}1694[#1694]) + +Network:: +* Isolate SocketPermissions to Netty {pull}23057[#23057] +* Wrap netty accept/connect ops with doPrivileged {pull}22572[#22572] (issue: {issue}22116[#22116]) +* Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions {pull}22287[#22287] (issue: {issue}22116[#22116]) + +Plugin Discovery EC2:: +* Read ec2 discovery address from aws instance tags {pull}22743[#22743] (issue: {issue}22566[#22566]) + +Plugin Repository HDFS:: +* Add doPrivilege blocks for socket connect ops in repository-hdfs {pull}22793[#22793] (issue: {issue}22116[#22116]) + +Plugins:: +* Add doPrivilege blocks for socket connect operations in plugins {pull}22534[#22534] (issue: {issue}22116[#22116]) + +Recovery:: +* Peer Recovery: remove maxUnsafeAutoIdTimestamp hand off {pull}24243[#24243] (issue: {issue}24149[#24149]) +* Introduce sequence-number-based recovery {pull}22484[#22484] (issue: {issue}10708[#10708]) + +Search:: +* Add parsing from xContent to Suggest {pull}22903[#22903] +* Add parsing from xContent to ShardSearchFailure {pull}22699[#22699] + +Sequence IDs:: +* Block global checkpoint advances when recovering {pull}24404[#24404] (issue: {issue}10708[#10708]) +* Add primary term to doc write response {pull}24171[#24171] (issue: {issue}10708[#10708]) +* Preserve multiple translog generations {pull}24015[#24015] (issue: {issue}10708[#10708]) +* Introduce translog generation rolling {pull}23606[#23606] (issue: {issue}10708[#10708]) +* Replicate write failures {pull}23314[#23314] +* Introduce sequence-number-aware translog {pull}22822[#22822] (issue: {issue}10708[#10708]) +* Introduce translog no-op {pull}22291[#22291] (issue: {issue}10708[#10708]) +* Tighten sequence numbers recovery {pull}22212[#22212] (issue: {issue}10708[#10708]) +* Add BWC layer to seq no infra and enable BWC tests {pull}22185[#22185] (issue: {issue}21670[#21670]) +* Add internal _primary_term doc values field, fix _seq_no indexing {pull}21637[#21637] (issues: {issue}10708[#10708], {issue}21480[#21480]) +* Add global checkpoint to translog checkpoints {pull}21254[#21254] +* Sequence numbers commit data for Lucene uses Iterable interface {pull}20793[#20793] (issue: {issue}10708[#10708]) +* Simplify GlobalCheckpointService and properly hook it for cluster state updates {pull}20720[#20720] + +Stats:: +* Expose disk usage estimates in nodes stats {pull}22081[#22081] (issue: {issue}8686[#8686]) + +Store:: +* Remote support for lucene versions without checksums {pull}24021[#24021] + +Suggesters:: +* Remove deprecated _suggest endpoint {pull}22203[#22203] (issue: {issue}20305[#20305]) + +Task Manager:: +* Add descriptions to bulk tasks {pull}22059[#22059] (issue: {issue}21768[#21768]) + +[float] +=== Bug Fixes + +Ingest:: +* Remove support for Visio and potm files {pull}22079[#22079] (issue: {issue}22077[#22077]) + +Inner Hits:: +* If size / offset are out of bounds just do a plain count {pull}20556[#20556] (issue: {issue}20501[#20501]) + +Internal:: +* Fix handling of document failure exception in InternalEngine {pull}22718[#22718] + +Plugin Ingest Attachment:: +* Add missing mime4j library {pull}22764[#22764] (issue: {issue}22077[#22077]) + +Plugin Repository S3:: +* Wrap getCredentials() in a doPrivileged() block {pull}23297[#23297] (issues: {issue}22534[#22534], {issue}23271[#23271]) + +Sequence IDs:: +* Avoid losing ops in file-based recovery {pull}22945[#22945] (issue: {issue}22484[#22484]) + +Snapshot/Restore:: +* Keep snapshot restore state and routing table in sync {pull}20836[#20836] (issue: {issue}19774[#19774]) + +Translog:: +* Fix Translog.Delete serialization for sequence numbers {pull}22543[#22543] + +[float] +=== Regressions + +Bulk:: +* Only re-parse operation if a mapping update was needed {pull}23832[#23832] (issue: {issue}23665[#23665]) + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to a Lucene 7 snapshot {pull}24089[#24089] (issues: {issue}23966[#23966], {issue}24086[#24086], {issue}24087[#24087], {issue}24088[#24088]) + +Plugin Ingest Attachment:: +* Update to Tika 1.14 {pull}21591[#21591] (issue: {issue}20390[#20390]) + +[[release-notes-6.0.0-alpha1-5x]] +== {es} version 6.0.0-alpha1 (Changes previously released in 5.x) + +The changes listed below were first released in the 5.x series. Changes +released for the first time in Elasticsearch 6.0.0-alpha1 are listed in +<>. + +[float] +[[breaking-6.0.0-alpha1-5x]] +=== Breaking Changes + +Aliases:: +* Validate alias names the same as index names {pull}20771[#20771] (issue: {issue}20748[#20748]) + +CRUD:: +* Fixed naming inconsistency for fields/stored_fields in the APIs {pull}20166[#20166] (issues: {issue}18943[#18943], {issue}20155[#20155]) + +Core:: +* Add system call filter bootstrap check {pull}21940[#21940] +* Remove ignore system bootstrap checks {pull}20511[#20511] + +Internal:: +* `_flush` should block by default {pull}20597[#20597] (issue: {issue}20569[#20569]) + +Packaging:: +* Rename service.bat to elasticsearch-service.bat {pull}20496[#20496] (issue: {issue}17528[#17528]) + +Plugin Lang Painless:: +* Remove all date 'now' methods from Painless {pull}20766[#20766] (issue: {issue}20762[#20762]) + +Query DSL:: +* Fix name of `enabled_position_increments` {pull}22895[#22895] + +REST:: +* Change separator for shards preference {pull}20786[#20786] (issues: {issue}20722[#20722], {issue}20769[#20769]) + +Search:: +* Remove DFS_QUERY_AND_FETCH as a search type {pull}22787[#22787] + +Settings:: +* Remove support for default settings {pull}24093[#24093] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Move getProperty method out of MultiBucketsAggregation.Bucket interface {pull}23988[#23988] +* Remove getProperty method from Aggregations interface and impl {pull}23972[#23972] +* Move getProperty method out of Aggregation interface {pull}23949[#23949] + +Allocation:: +* Cluster Explain API uses the allocation process to explain shard allocation decisions {pull}22182[#22182] (issues: {issue}20347[#20347], {issue}20634[#20634], {issue}21103[#21103], {issue}21662[#21662], {issue}21691[#21691]) + +Cluster:: +* Remove PROTO-based custom cluster state components {pull}22336[#22336] (issue: {issue}21868[#21868]) + +Core:: +* Remove ability to plug-in TransportService {pull}20505[#20505] + +Discovery:: +* Remove pluggability of ElectMasterService {pull}21031[#21031] + +Exceptions:: +* Remove `IndexTemplateAlreadyExistsException` and `IndexShardAlreadyExistsException` {pull}21539[#21539] (issue: {issue}21494[#21494]) +* Replace IndexAlreadyExistsException with ResourceAlreadyExistsException {pull}21494[#21494] + +Ingest:: +* Change type of ingest doc meta-data field 'TIMESTAMP' to `Date` {pull}22234[#22234] (issue: {issue}22074[#22074]) + +Internal:: +* Replace SearchExtRegistry with namedObject {pull}22492[#22492] +* Replace Suggesters with namedObject {pull}22491[#22491] +* Consolidate the last easy parser construction {pull}22095[#22095] +* Introduce XContentParser#namedObject {pull}22003[#22003] +* Pass executor name to request interceptor to support async intercept calls {pull}21089[#21089] +* Remove TransportService#registerRequestHandler leniency {pull}20469[#20469] (issue: {issue}20468[#20468]) + +Java API:: +* Fold InternalSearchHits and friends into their interfaces {pull}23042[#23042] + +Network:: +* Remove HttpServer and HttpServerAdapter in favor of a simple dispatch method {pull}22636[#22636] (issue: {issue}18482[#18482]) +* Unguice Transport and friends {pull}20526[#20526] + +Plugins:: +* Deguice rest handlers {pull}22575[#22575] +* Plugins: Replace Rest filters with RestHandler wrapper {pull}21905[#21905] +* Plugins: Remove support for onModule {pull}21416[#21416] +* Cleanup sub fetch phase extension point {pull}20382[#20382] + +Query DSL:: +* Resolve index names in indices_boost {pull}21393[#21393] (issue: {issue}4756[#4756]) + +Scripting:: +* Refactor ScriptType to be a Top-Level Class {pull}21136[#21136] + +Search:: +* Remove QUERY_AND_FETCH search type {pull}22996[#22996] +* Cluster search shards improvements: expose ShardId, adjust visibility of some members {pull}21752[#21752] + +[float] +=== Deprecations + +Java API:: +* Add BulkProcessor methods with XContentType parameter {pull}23078[#23078] (issue: {issue}22691[#22691]) +* Deprecate and remove "minimumNumberShouldMatch" in BoolQueryBuilder {pull}22403[#22403] + +Plugin Repository S3:: +* S3 Repository: Deprecate remaining `repositories.s3.*` settings {pull}24144[#24144] (issue: {issue}24143[#24143]) +* Deprecate specifying credentials through env vars, sys props, and remove profile files {pull}22567[#22567] (issues: {issue}21041[#21041], {issue}22479[#22479]) + +Query DSL:: +* Add deprecation logging message for 'fuzzy' query {pull}20993[#20993] (issue: {issue}15760[#15760]) + +REST:: +* Optionally require a valid content type for all rest requests with content {pull}22691[#22691] (issue: {issue}19388[#19388]) + +Scripting:: +* Change Namespace for Stored Script to Only Use Id {pull}22206[#22206] + +Shadow Replicas:: +* Add a deprecation notice to shadow replicas {pull}22647[#22647] (issue: {issue}22024[#22024]) + +Stats:: +* Deprecate _field_stats endpoint {pull}23914[#23914] + +[float] +=== New Features + +Aggregations:: +* Initial version of an adjacency matrix using the Filters aggregation {pull}22239[#22239] (issue: {issue}22169[#22169]) + +Analysis:: +* Adds pattern keyword marker filter support {pull}23600[#23600] (issue: {issue}4877[#4877]) +* Expose WordDelimiterGraphTokenFilter {pull}23327[#23327] (issue: {issue}23104[#23104]) +* Synonym Graph Support (LUCENE-6664) {pull}21517[#21517] +* Expose Lucenes Ukrainian analyzer {pull}21176[#21176] (issue: {issue}19433[#19433]) + +CAT API:: +* Provides a cat api endpoint for templates. {pull}20545[#20545] (issue: {issue}20467[#20467]) + +CRUD:: +* Allow an index to be partitioned with custom routing {pull}22274[#22274] (issue: {issue}21585[#21585]) + +Highlighting:: +* Integrate UnifiedHighlighter {pull}21621[#21621] (issue: {issue}21376[#21376]) + +Index APIs:: +* Add FieldCapabilities (_field_caps) API {pull}23007[#23007] (issue: {issue}22438[#22438]) + +Ingest:: +* introduce KV Processor in Ingest Node {pull}22272[#22272] (issue: {issue}22222[#22222]) + +Mapping:: +* Add the ability to set a normalizer on keyword fields. {pull}21919[#21919] (issue: {issue}18064[#18064]) +* Add RangeFieldMapper for numeric and date range types {pull}21002[#21002] (issue: {issue}20999[#20999]) + +Plugin Discovery File:: +* File-based discovery plugin {pull}20394[#20394] (issue: {issue}20323[#20323]) + +Query DSL:: +* Add "all fields" execution mode to simple_query_string query {pull}21341[#21341] (issues: {issue}19784[#19784], {issue}20925[#20925]) +* Add support for `quote_field_suffix` to `simple_query_string`. {pull}21060[#21060] (issue: {issue}18641[#18641]) +* Add "all field" execution mode to query_string query {pull}20925[#20925] (issue: {issue}19784[#19784]) + +Reindex API:: +* Add automatic parallelization support to reindex and friends {pull}20767[#20767] (issue: {issue}20624[#20624]) + +Search:: +* Introduce incremental reduction of TopDocs {pull}23946[#23946] +* Add federated cross cluster search capabilities {pull}22502[#22502] (issue: {issue}21473[#21473]) +* Add field collapsing for search request {pull}22337[#22337] (issue: {issue}21833[#21833]) + +Settings:: +* Add infrastructure for elasticsearch keystore {pull}22335[#22335] + +Similarities:: +* Adds boolean similarity to Elasticsearch {pull}23637[#23637] (issue: {issue}6731[#6731]) + +[float] +=== Enhancements + +Aggregations:: +* Add `count` to rest output of `geo_centroid` {pull}24387[#24387] (issue: {issue}24366[#24366]) +* Allow scripted metric agg to access `_score` {pull}24295[#24295] +* Add BucketMetricValue interface {pull}24188[#24188] +* Move aggs CommonFields and TYPED_KEYS_DELIMITER from InternalAggregation to Aggregation {pull}23987[#23987] +* Use ParseField for aggs CommonFields rather than String {pull}23717[#23717] +* Share XContent rendering code in terms aggs {pull}23680[#23680] +* Add unit tests for ParentToChildAggregator {pull}23305[#23305] (issue: {issue}22278[#22278]) +* First step towards incremental reduction of query responses {pull}23253[#23253] +* `value_type` is useful regardless of scripting. {pull}22160[#22160] (issue: {issue}20163[#20163]) +* Support for partitioning set of terms {pull}21626[#21626] (issue: {issue}21487[#21487]) +* Rescorer should be applied in the TopHits aggregation {pull}20978[#20978] (issue: {issue}19317[#19317]) + +Aliases:: +* Handle multiple aliases in _cat/aliases api {pull}23698[#23698] (issue: {issue}23661[#23661]) + +Allocation:: +* Trigger replica recovery restarts by master when primary relocation completes {pull}23926[#23926] (issue: {issue}23904[#23904]) +* Makes the same_shard host dynamically updatable {pull}23397[#23397] (issue: {issue}22992[#22992]) +* Include stale replica shard info when explaining an unassigned primary {pull}22826[#22826] +* Adds setting level to allocation decider explanations {pull}22268[#22268] (issue: {issue}21771[#21771]) +* Improves allocation decider decision explanation messages {pull}21771[#21771] +* Prepares allocator decision objects for use with the allocation explain API {pull}21691[#21691] +* Balance step in BalancedShardsAllocator for a single shard {pull}21103[#21103] +* Process more expensive allocation deciders last {pull}20724[#20724] (issue: {issue}12815[#12815]) +* Separates decision making from decision application in BalancedShardsAllocator {pull}20634[#20634] + +Analysis:: +* Support Keyword type in Analyze API {pull}23161[#23161] +* Expose FlattenGraphTokenFilter {pull}22643[#22643] +* Analyze API Position Length Support {pull}22574[#22574] +* Remove AnalysisService and reduce it to a simple name to analyzer mapping {pull}20627[#20627] (issues: {issue}19827[#19827], {issue}19828[#19828]) + +CAT API:: +* Adding built-in sorting capability to _cat apis. {pull}20658[#20658] (issue: {issue}16975[#16975]) +* Add health status parameter to cat indices API {pull}20393[#20393] + +CRUD:: +* Use correct block levels for TRA subclasses {pull}22224[#22224] +* Make index and delete operation execute as a single bulk item {pull}21964[#21964] + +Cache:: +* Do not cache term queries. {pull}21566[#21566] (issues: {issue}16031[#16031], {issue}20116[#20116]) +* Parse alias filters on the coordinating node {pull}20916[#20916] + +Circuit Breakers:: +* Closing a ReleasableBytesStreamOutput closes the underlying BigArray {pull}23941[#23941] +* Add used memory amount to CircuitBreakingException message (#22521) {pull}22693[#22693] (issue: {issue}22521[#22521]) +* Cluster Settings Updates should not trigger circuit breakers. {pull}20827[#20827] + +Cluster:: +* Extract a common base class to allow services to listen to remote cluster config updates {pull}24367[#24367] +* Prevent nodes from joining if newer indices exist in the cluster {pull}23843[#23843] +* Connect to new nodes concurrently {pull}22984[#22984] (issue: {issue}22828[#22828]) +* Keep NodeConnectionsService in sync with current nodes in the cluster state {pull}22509[#22509] +* Add a generic way of checking version before serializing custom cluster object {pull}22376[#22376] (issue: {issue}22313[#22313]) +* Add validation for supported index version on node join, restore, upgrade & open index {pull}21830[#21830] (issue: {issue}21670[#21670]) +* Let ClusterStateObserver only hold onto state that's needed for change detection {pull}21631[#21631] (issue: {issue}21568[#21568]) +* Cache successful shard deletion checks {pull}21438[#21438] +* Remove mutable status field from cluster state {pull}21379[#21379] +* Skip shard management code when updating cluster state on client/tribe nodes {pull}20731[#20731] +* Add clusterUUID to RestMainAction output {pull}20503[#20503] + +Core:: +* Regex upgrades {pull}24316[#24316] (issue: {issue}24226[#24226]) +* Detect remnants of path.data/default.path.data bug {pull}24099[#24099] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074], {issue}24093[#24093]) +* Await termination after shutting down executors {pull}23889[#23889] +* Add early-access check {pull}23743[#23743] (issue: {issue}23668[#23668]) +* Adapter action future should restore interrupts {pull}23618[#23618] (issue: {issue}23617[#23617]) +* Disable bootstrap checks for single-node discovery {pull}23598[#23598] (issues: {issue}23585[#23585], {issue}23595[#23595]) +* Enable explicitly enforcing bootstrap checks {pull}23585[#23585] (issue: {issue}21864[#21864]) +* Add equals/hashcode method to ReplicationResponse {pull}23215[#23215] +* Simplify ElasticsearchException rendering as a XContent {pull}22611[#22611] +* Remove setLocalNode from ClusterService and TransportService {pull}22608[#22608] +* Rename bootstrap.seccomp to bootstrap.system_call_filter {pull}22226[#22226] (issue: {issue}21940[#21940]) +* Cleanup random stats serialization code {pull}22223[#22223] +* Avoid corruption when deserializing booleans {pull}22152[#22152] +* Reduce memory pressure when sending large terms queries. {pull}21776[#21776] +* Install a security manager on startup {pull}21716[#21716] +* Log node ID on startup {pull}21673[#21673] +* Ensure source filtering automatons are only compiled once {pull}20857[#20857] (issue: {issue}20839[#20839]) +* Improve scheduling fairness when batching cluster state changes with equal priority {pull}20775[#20775] (issue: {issue}20768[#20768]) +* Add production warning for pre-release builds {pull}20674[#20674] +* Add serial collector bootstrap check {pull}20558[#20558] +* Do not log full bootstrap checks exception {pull}19989[#19989] + +Dates:: +* Improve error handling for epoch format parser with time zone (#22621) {pull}23689[#23689] + +Discovery:: +* Introduce single-node discovery {pull}23595[#23595] +* UnicastZenPing shouldn't ping the address of the local node {pull}23567[#23567] +* MasterFaultDetection can start after the initial cluster state has been processed {pull}23037[#23037] (issue: {issue}22828[#22828]) +* Simplify Unicast Zen Ping {pull}22277[#22277] (issues: {issue}19370[#19370], {issue}21739[#21739], {issue}22120[#22120], {issue}22194[#22194]) +* Prefer joining node with conflicting transport address when becoming master {pull}22134[#22134] (issues: {issue}22049[#22049], {issue}22120[#22120]) + +Engine:: +* Engine: store maxUnsafeAutoIdTimestamp in commit {pull}24149[#24149] +* Replace EngineClosedException with AlreadyClosedExcpetion {pull}22631[#22631] + +Exceptions:: +* Add BWC layer for Exceptions {pull}21694[#21694] (issue: {issue}21656[#21656]) + +Geo:: +* Optimize geo-distance sorting. {pull}20596[#20596] (issue: {issue}20450[#20450]) + +Highlighting:: +* Add support for fragment_length in the unified highlighter {pull}23431[#23431] +* Add BreakIteratorBoundaryScanner support {pull}23248[#23248] + +Index APIs:: +* Open and close index to honour allow_no_indices option {pull}24222[#24222] (issue: {issue}24031[#24031]) +* Wildcard cluster names for cross cluster search {pull}23985[#23985] (issue: {issue}23893[#23893]) +* Indexing: Add shard id to indexing operation listener {pull}22606[#22606] +* Better error when can't auto create index {pull}22488[#22488] (issues: {issue}21448[#21448], {issue}22435[#22435]) +* Add date-math support to `_rollover` {pull}20709[#20709] + +Ingest:: +* Lazy load the geoip databases {pull}23337[#23337] +* add `ignore_missing` flag to ingest plugins {pull}22273[#22273] +* Added ability to remove pipelines via wildcards (#22149) {pull}22191[#22191] (issue: {issue}22149[#22149]) +* Enables the ability to inject serialized json fields into root of document {pull}22179[#22179] (issue: {issue}21898[#21898]) +* compile ScriptProcessor inline scripts when creating ingest pipelines {pull}21858[#21858] (issue: {issue}21842[#21842]) +* add `ignore_missing` option to SplitProcessor {pull}20982[#20982] (issues: {issue}19995[#19995], {issue}20840[#20840]) +* add ignore_missing option to convert,trim,lowercase,uppercase,grok,rename {pull}20194[#20194] (issue: {issue}19995[#19995]) +* introduce the JSON Processor {pull}20128[#20128] (issue: {issue}20052[#20052]) + +Internal:: +* Add cross cluster support to `_field_caps` {pull}24463[#24463] (issue: {issue}24334[#24334]) +* Log JVM arguments on startup {pull}24451[#24451] +* Preserve cluster alias throughout search execution to lookup nodes by cluster and ID {pull}24438[#24438] +* Move RemoteClusterService into TransportService {pull}24424[#24424] +* Enum related performance additions. {pull}24274[#24274] (issue: {issue}24226[#24226]) +* Add a dedicated TransportRemoteInfoAction for consistency {pull}24040[#24040] (issue: {issue}23969[#23969]) +* Simplify sorted top docs merging in SearchPhaseController {pull}23881[#23881] +* Synchronized CollapseTopFieldDocs with lucenes relatives {pull}23854[#23854] +* Cleanup SearchPhaseController interface {pull}23844[#23844] +* Do not create String instances in 'Strings' methods accepting StringBuilder {pull}22907[#22907] +* Improve connection closing in `RemoteClusterConnection` {pull}22804[#22804] (issue: {issue}22803[#22803]) +* Remove some more usages of ParseFieldMatcher {pull}22437[#22437] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some more usages of ParseFieldMatcher {pull}22398[#22398] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some more usages of ParseFieldMatcher {pull}22395[#22395] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some ParseFieldMatcher usages {pull}22389[#22389] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Introduce ToXContentObject interface {pull}22387[#22387] (issue: {issue}16347[#16347]) +* Add infrastructure to manage network connections outside of Transport/TransportService {pull}22194[#22194] +* Replace strict parsing mode with response headers assertions {pull}22130[#22130] (issues: {issue}11859[#11859], {issue}19552[#19552], {issue}20993[#20993]) +* Start using `ObjectParser` for aggs. {pull}22048[#22048] (issue: {issue}22009[#22009]) +* Don't output null source node in RecoveryFailedException {pull}21963[#21963] +* ClusterService should expose "applied" cluster states (i.e., remove ClusterStateStatus) {pull}21817[#21817] +* Rename ClusterState#lookupPrototypeSafe to `lookupPrototype` and remove "unsafe" unused variant {pull}21686[#21686] +* ShardActiveResponseHandler shouldn't hold to an entire cluster state {pull}21470[#21470] (issue: {issue}21394[#21394]) +* Remove unused ClusterService dependency from SearchPhaseController {pull}21421[#21421] +* Remove special case in case no action filters are registered {pull}21251[#21251] +* Use TimveValue instead of long for CacheBuilder methods {pull}20887[#20887] +* Remove SearchContext#current and all it's threadlocals {pull}20778[#20778] (issue: {issue}19341[#19341]) +* Remove poor-mans compression in InternalSearchHit and friends {pull}20472[#20472] + +Java API:: +* Added types options to DeleteByQueryRequest {pull}23265[#23265] (issue: {issue}21984[#21984]) +* prevent NPE when trying to uncompress a null BytesReference {pull}22386[#22386] + +Java High Level REST Client:: +* Add utility method to parse named XContent objects with typed prefix {pull}24240[#24240] (issue: {issue}22965[#22965]) +* Convert suggestion response parsing to use NamedXContentRegistry {pull}23355[#23355] +* UpdateRequest implements ToXContent {pull}23289[#23289] +* Add javadoc for DocWriteResponse.Builders {pull}23267[#23267] +* Expose WriteRequest.RefreshPolicy string representation {pull}23106[#23106] +* Use `typed_keys` parameter to prefix suggester names by type in search responses {pull}23080[#23080] (issue: {issue}22965[#22965]) +* Add parsing from xContent to MainResponse {pull}22934[#22934] +* Parse elasticsearch exception's root causes {pull}22924[#22924] +* Add parsing method to BytesRestResponse's error {pull}22873[#22873] +* Add parsing methods to BulkItemResponse {pull}22859[#22859] +* Add parsing method for ElasticsearchException.generateFailureXContent() {pull}22815[#22815] +* Add parsing method for ElasticsearchException.generateThrowableXContent() {pull}22783[#22783] +* Add parsing methods for UpdateResponse {pull}22586[#22586] +* Add parsing from xContent to InternalSearchHit and InternalSearchHits {pull}22429[#22429] +* Add fromxcontent methods to index response {pull}22229[#22229] +* Add fromXContent() methods for ReplicationResponse {pull}22196[#22196] (issue: {issue}22082[#22082]) +* Add parsing method for ElasticsearchException {pull}22143[#22143] +* Add fromXContent method to GetResponse {pull}22082[#22082] + +Java REST Client:: +* move ignore parameter support from yaml test client to low level rest client {pull}22637[#22637] +* Warn log deprecation warnings received from server {pull}21895[#21895] +* Support Preemptive Authentication with RestClient {pull}21336[#21336] +* Provide error message when rest request path is null {pull}21233[#21233] (issue: {issue}21232[#21232]) + +Logging:: +* Log deleting indices at info level {pull}22627[#22627] (issue: {issue}22605[#22605]) +* Expose logs base path {pull}22625[#22625] +* Log failure to connect to node at info instead of debug {pull}21809[#21809] (issue: {issue}6468[#6468]) +* Truncate log messages from the end {pull}21609[#21609] (issue: {issue}21602[#21602]) +* Ensure logging is initialized in CLI tools {pull}20575[#20575] +* Give useful error message if log config is missing {pull}20493[#20493] +* Complete Elasticsearch logger names {pull}20457[#20457] (issue: {issue}20326[#20326]) +* Logging shutdown hack {pull}20389[#20389] (issue: {issue}20304[#20304]) +* Disable console logging {pull}20387[#20387] +* Warn on not enough masters during election {pull}20063[#20063] (issue: {issue}8362[#8362]) + +Mapping:: +* Do not index `_type` when there is at most one type. {pull}24363[#24363] +* Only allow one type on 6.0 indices {pull}24317[#24317] (issue: {issue}15613[#15613]) +* token_count type : add an option to count tokens (fix #23227) {pull}24175[#24175] (issue: {issue}23227[#23227]) +* Atomic mapping updates across types {pull}22220[#22220] +* Only update DocumentMapper if field type changes {pull}22165[#22165] +* Better error message when _parent isn't an object {pull}21987[#21987] +* Create the QueryShardContext lazily in DocumentMapperParser. {pull}21287[#21287] + +Nested Docs:: +* Avoid adding unnecessary nested filters when ranges are used. {pull}23427[#23427] + +Network:: +* Set available processors for Netty {pull}24420[#24420] (issue: {issue}6224[#6224]) +* Adjust default Netty receive predictor size to 64k {pull}23542[#23542] (issue: {issue}23185[#23185]) +* Keep the pipeline handler queue small initially {pull}23335[#23335] +* Set network receive predictor size to 32kb {pull}23284[#23284] (issue: {issue}23185[#23185]) +* TransportService.connectToNode should validate remote node ID {pull}22828[#22828] (issue: {issue}22194[#22194]) +* Disable the Netty recycler {pull}22452[#22452] (issues: {issue}22189[#22189], {issue}22360[#22360], {issue}22406[#22406], {issue}5904[#5904]) +* Tell Netty not to be unsafe in transport client {pull}22284[#22284] +* Introduce a low level protocol handshake {pull}22094[#22094] +* Detach handshake from connect to node {pull}22037[#22037] +* Reduce number of connections per node depending on the nodes role {pull}21849[#21849] +* Add a connect timeout to the ConnectionProfile to allow per node connect timeouts {pull}21847[#21847] (issue: {issue}19719[#19719]) +* Grant Netty permission to read system somaxconn {pull}21840[#21840] +* Remove connectToNodeLight and replace it with a connection profile {pull}21799[#21799] +* Lazy resolve unicast hosts {pull}21630[#21630] (issues: {issue}14441[#14441], {issue}16412[#16412]) +* Fix handler name on message not fully read {pull}21478[#21478] +* Handle rejected pings on shutdown gracefully {pull}20842[#20842] +* Network: Allow to listen on virtual interfaces. {pull}19568[#19568] (issues: {issue}17473[#17473], {issue}19537[#19537]) + +Packaging:: +* Introduce Java version check {pull}23194[#23194] (issue: {issue}21102[#21102]) +* Improve the out-of-the-box experience {pull}21920[#21920] (issues: {issue}18317[#18317], {issue}21783[#21783]) +* Add empty plugins dir for archive distributions {pull}21204[#21204] (issue: {issue}20342[#20342]) +* Make explicit missing settings for Windows service {pull}21200[#21200] (issue: {issue}18317[#18317]) +* Change permissions on config files {pull}20966[#20966] +* Add quiet option to disable console logging {pull}20422[#20422] (issues: {issue}15315[#15315], {issue}16159[#16159], {issue}17220[#17220]) + +Percolator:: +* Allowing range queries with now ranges inside percolator queries {pull}23921[#23921] (issue: {issue}23859[#23859]) +* Add term extraction support for MultiPhraseQuery {pull}23176[#23176] + +Plugin Discovery EC2:: +* Settings: Migrate ec2 discovery sensitive settings to elasticsearch keystore {pull}23961[#23961] (issue: {issue}22475[#22475]) +* Add support for ca-central-1 region to EC2 and S3 plugins {pull}22458[#22458] (issue: {issue}22454[#22454]) +* Support for eu-west-2 (London) cloud-aws plugin {pull}22308[#22308] (issue: {issue}22306[#22306]) +* Add us-east-2 AWS region {pull}21961[#21961] (issue: {issue}21881[#21881]) +* Add setting to set read timeout for EC2 discovery and S3 repository plugins {pull}21956[#21956] (issue: {issue}19078[#19078]) + +Plugin Ingest GeoIp:: +* Cache results of geoip lookups {pull}22231[#22231] (issue: {issue}22074[#22074]) + +Plugin Lang Painless:: +* Allow painless to load stored fields {pull}24290[#24290] +* Start on custom whitelists for Painless {pull}23563[#23563] +* Fix Painless's implementation of interfaces returning primitives {pull}23298[#23298] (issue: {issue}22983[#22983]) +* Allow painless to implement more interfaces {pull}22983[#22983] +* Generate reference links for painless API {pull}22775[#22775] +* Painless: Add augmentation to String for base 64 {pull}22665[#22665] (issue: {issue}22648[#22648]) +* Improve painless's ScriptException generation {pull}21762[#21762] (issue: {issue}21733[#21733]) +* Add Debug.explain to painless {pull}21723[#21723] (issue: {issue}20263[#20263]) +* Implement the ?: operator in painless {pull}21506[#21506] +* In painless suggest a long constant if int won't do {pull}21415[#21415] (issue: {issue}21313[#21313]) +* Support decimal constants with trailing [dD] in painless {pull}21412[#21412] (issue: {issue}21116[#21116]) +* Implement reading from null safe dereferences {pull}21239[#21239] +* Painless negative offsets {pull}21080[#21080] (issue: {issue}20870[#20870]) +* Remove more equivalents of the now method from the Painless whitelist. {pull}21047[#21047] +* Disable regexes by default in painless {pull}20427[#20427] (issue: {issue}20397[#20397]) + +Plugin Repository Azure:: +* Add Backoff policy to azure repository {pull}23387[#23387] (issue: {issue}22728[#22728]) + +Plugin Repository S3:: +* Removes the retry mechanism from the S3 blob store {pull}23952[#23952] (issue: {issue}22845[#22845]) +* S3 Repository: Eagerly load static settings {pull}23910[#23910] +* S3 repository: Add named configurations {pull}22762[#22762] (issues: {issue}22479[#22479], {issue}22520[#22520]) +* Make the default S3 buffer size depend on the available memory. {pull}21299[#21299] + +Plugins:: +* Plugins: Add support for platform specific plugins {pull}24265[#24265] +* Plugins: Remove leniency for missing plugins dir {pull}24173[#24173] +* Modify permissions dialog for plugins {pull}23742[#23742] +* Plugins: Add plugin cli specific exit codes {pull}23599[#23599] (issue: {issue}15295[#15295]) +* Plugins: Output better error message when existing plugin is incompatible {pull}23562[#23562] (issue: {issue}20691[#20691]) +* Add the ability to define search response listeners in search plugin {pull}22682[#22682] +* Pass ThreadContext to transport interceptors to allow header modification {pull}22618[#22618] (issue: {issue}22585[#22585]) +* Provide helpful error message if a plugin exists {pull}22305[#22305] (issue: {issue}22084[#22084]) +* Add shutdown hook for closing CLI commands {pull}22126[#22126] (issue: {issue}22111[#22111]) +* Allow plugins to install bootstrap checks {pull}22110[#22110] +* Clarify that plugins can be closed {pull}21669[#21669] +* Plugins: Convert custom discovery to pull based plugin {pull}21398[#21398] +* Removing plugin that isn't installed shouldn't trigger usage information {pull}21272[#21272] (issue: {issue}21250[#21250]) +* Remove pluggability of ZenPing {pull}21049[#21049] +* Make UnicastHostsProvider extension pull based {pull}21036[#21036] +* Revert "Display plugins versions" {pull}20807[#20807] (issues: {issue}18683[#18683], {issue}20668[#20668]) +* Provide error message when plugin id is missing {pull}20660[#20660] + +Query DSL:: +* Make it possible to validate a query on all shards instead of a single random shard {pull}23697[#23697] (issue: {issue}18254[#18254]) +* QueryString and SimpleQueryString Graph Support {pull}22541[#22541] +* Additional Graph Support in Match Query {pull}22503[#22503] (issue: {issue}22490[#22490]) +* RangeQuery WITHIN case now normalises query {pull}22431[#22431] (issue: {issue}22412[#22412]) +* Un-deprecate fuzzy query {pull}22088[#22088] (issue: {issue}15760[#15760]) +* support numeric bounds with decimal parts for long/integer/short/byte datatypes {pull}21972[#21972] (issue: {issue}21600[#21600]) +* Using ObjectParser in MatchAllQueryBuilder and IdsQueryBuilder {pull}21273[#21273] +* Expose splitOnWhitespace in `Query String Query` {pull}20965[#20965] (issue: {issue}20841[#20841]) +* Throw error if query element doesn't end with END_OBJECT {pull}20528[#20528] (issue: {issue}20515[#20515]) +* Remove `lowercase_expanded_terms` and `locale` from query-parser options. {pull}20208[#20208] (issue: {issue}9978[#9978]) + +REST:: +* Allow passing single scrollID in clear scroll API body {pull}24242[#24242] (issue: {issue}24233[#24233]) +* Validate top-level keys when parsing mget requests {pull}23746[#23746] (issue: {issue}23720[#23720]) +* Cluster stats should not render empty http/transport types {pull}23735[#23735] +* Add parameter to prefix aggs name with type in search responses {pull}22965[#22965] +* Add a REST spec for the create API {pull}20924[#20924] +* Add response params to REST params did you mean {pull}20753[#20753] (issues: {issue}20722[#20722], {issue}20747[#20747]) +* Add did you mean to strict REST params {pull}20747[#20747] (issue: {issue}20722[#20722]) + +Reindex API:: +* Increase visibility of doExecute so it can be used directly {pull}22614[#22614] +* Improve error message when reindex-from-remote gets bad json {pull}22536[#22536] (issue: {issue}22330[#22330]) +* Reindex: Better error message for pipeline in wrong place {pull}21985[#21985] +* Timeout improvements for rest client and reindex {pull}21741[#21741] (issue: {issue}21707[#21707]) +* Add "simple match" support for reindex-from-remote whitelist {pull}21004[#21004] +* Make reindex-from-remote ignore unknown fields {pull}20591[#20591] (issue: {issue}20504[#20504]) + +Scripting:: +* Expose multi-valued dates to scripts and document painless's date functions {pull}22875[#22875] (issue: {issue}22162[#22162]) +* Wrap VerifyError in ScriptException {pull}21769[#21769] +* Log ScriptException's xcontent if file script compilation fails {pull}21767[#21767] (issue: {issue}21733[#21733]) +* Support binary field type in script values {pull}21484[#21484] (issue: {issue}14469[#14469]) +* Mustache: Add {{#url}}{{/url}} function to URL encode strings {pull}20838[#20838] +* Expose `ctx._now` in update scripts {pull}20835[#20835] (issue: {issue}17895[#17895]) + +Search:: +* Remove leniency when merging fetched hits in a search response phase {pull}24158[#24158] +* Set shard count limit to unlimited {pull}24012[#24012] +* Streamline shard index availability in all SearchPhaseResults {pull}23788[#23788] +* Search took time should use a relative clock {pull}23662[#23662] +* Prevent negative `from` parameter in SearchSourceBuilder {pull}23358[#23358] (issue: {issue}23324[#23324]) +* Remove unnecessary result sorting in SearchPhaseController {pull}23321[#23321] +* Expose `batched_reduce_size` via `_search` {pull}23288[#23288] (issue: {issue}23253[#23253]) +* Adding fromXContent to Suggest and Suggestion class {pull}23226[#23226] (issue: {issue}23202[#23202]) +* Adding fromXContent to Suggestion.Entry and subclasses {pull}23202[#23202] +* Add CollapseSearchPhase as a successor for the FetchSearchPhase {pull}23165[#23165] +* Integrate IndexOrDocValuesQuery. {pull}23119[#23119] +* Detach SearchPhases from AbstractSearchAsyncAction {pull}23118[#23118] +* Fix GraphQuery expectation after Lucene upgrade to 6.5 {pull}23117[#23117] (issue: {issue}23102[#23102]) +* Nested queries should avoid adding unnecessary filters when possible. {pull}23079[#23079] (issue: {issue}20797[#20797]) +* Add xcontent parsing to completion suggestion option {pull}23071[#23071] +* Add xcontent parsing to suggestion options {pull}23018[#23018] +* Separate reduce (aggs, suggest and profile) from merging fetched hits {pull}23017[#23017] +* Add a setting to disable remote cluster connections on a node {pull}23005[#23005] +* First step towards separating individual search phases {pull}22802[#22802] +* Add parsing from xContent to SearchProfileShardResults and nested classes {pull}22649[#22649] +* Move SearchTransportService and SearchPhaseController creation outside of TransportSearchAction constructor {pull}21754[#21754] +* Don't carry ShardRouting around when not needed in AbstractSearchAsyncAction {pull}21753[#21753] +* ShardSearchRequest to take ShardId constructor argument rather than the whole ShardRouting {pull}21750[#21750] +* Use index uuid as key in the alias filter map rather than the index name {pull}21749[#21749] +* Add indices and filter information to search shards api output {pull}21738[#21738] (issue: {issue}20916[#20916]) +* remove pointless catch exception in TransportSearchAction {pull}21689[#21689] +* Optimize query with types filter in the URL (t/t/_search) {pull}20979[#20979] +* Makes search action cancelable by task management API {pull}20405[#20405] + +Search Templates:: +* Add profile and explain parameters to template API {pull}20451[#20451] + +Settings:: +* Add secure file setting to keystore {pull}24001[#24001] +* Add a property to mark setting as final {pull}23872[#23872] +* Remove obsolete index setting `index.version.minimum_compatible`. {pull}23593[#23593] +* Provide a method to retrieve a closeable char[] from a SecureString {pull}23389[#23389] +* Update indices settings api to support CBOR and SMILE format {pull}23309[#23309] (issues: {issue}23242[#23242], {issue}23245[#23245]) +* Improve setting deprecation message {pull}23156[#23156] (issue: {issue}22849[#22849]) +* Add secure settings validation on startup {pull}22894[#22894] +* Allow comma delimited array settings to have a space after each entry {pull}22591[#22591] (issue: {issue}22297[#22297]) +* Allow affix settings to be dynamic / updatable {pull}22526[#22526] +* Allow affix settings to delegate to actual settings {pull}22523[#22523] +* Make s3 repository sensitive settings use secure settings {pull}22479[#22479] +* Speed up filter and prefix settings operations {pull}22249[#22249] +* Add precise logging on unknown or invalid settings {pull}20951[#20951] (issue: {issue}20946[#20946]) + +Snapshot/Restore:: +* Ensure every repository has an incompatible-snapshots blob {pull}24403[#24403] (issue: {issue}22267[#22267]) +* Change snapshot status error to use generic SnapshotException {pull}24355[#24355] (issue: {issue}24225[#24225]) +* Duplicate snapshot name throws InvalidSnapshotNameException {pull}22921[#22921] (issue: {issue}18228[#18228]) +* Fixes retrieval of the latest snapshot index blob {pull}22700[#22700] +* Use general cluster state batching mechanism for snapshot state updates {pull}22528[#22528] (issue: {issue}14899[#14899]) +* Synchronize snapshot deletions on the cluster state {pull}22313[#22313] (issue: {issue}19957[#19957]) +* Abort snapshots on a node that leaves the cluster {pull}21084[#21084] (issue: {issue}20876[#20876]) + +Stats:: +* Show JVM arguments {pull}24450[#24450] +* Add cross-cluster search remote cluster info API {pull}23969[#23969] (issue: {issue}23925[#23925]) +* Add geo_point to FieldStats {pull}21947[#21947] (issue: {issue}20707[#20707]) +* Include unindexed field in FieldStats response {pull}21821[#21821] (issue: {issue}21952[#21952]) +* Remove load average leniency {pull}21380[#21380] +* Strengthen handling of unavailable cgroup stats {pull}21094[#21094] (issue: {issue}21029[#21029]) +* Add basic cgroup CPU metrics {pull}21029[#21029] + +Suggesters:: +* Provide informative error message in case of unknown suggestion context. {pull}24241[#24241] +* Allow different data types for category in Context suggester {pull}23491[#23491] (issue: {issue}22358[#22358]) + +Task Manager:: +* Limit IndexRequest toString() length {pull}22832[#22832] +* Improve the error message if task and node isn't found {pull}22062[#22062] (issue: {issue}22027[#22027]) +* Add descriptions to create snapshot and restore snapshot tasks. {pull}21901[#21901] (issue: {issue}21768[#21768]) +* Add proper descriptions to reindex, update-by-query and delete-by-query tasks. {pull}21841[#21841] (issue: {issue}21768[#21768]) +* Add search task descriptions {pull}21740[#21740] + +Tribe Node:: +* Add support for merging custom meta data in tribe node {pull}21552[#21552] (issues: {issue}20544[#20544], {issue}20791[#20791], {issue}9372[#9372]) + +Upgrade API:: +* Allow plugins to upgrade templates and index metadata on startup {pull}24379[#24379] + +[float] +=== Bug Fixes + + +Aggregations:: +* InternalPercentilesBucket should not rely on ordered percents array {pull}24336[#24336] (issue: {issue}24331[#24331]) +* Align behavior HDR percentiles iterator with percentile() method {pull}24206[#24206] +* The `filter` and `significant_terms` aggregations should parse the `filter` as a filter, not a query. {pull}23797[#23797] +* Completion suggestion should also consider text if prefix/regex is missing {pull}23451[#23451] (issue: {issue}23340[#23340]) +* Fixes the per term error in the terms aggregation {pull}23399[#23399] +* Fixes terms error count for multiple reduce phases {pull}23291[#23291] (issue: {issue}23286[#23286]) +* Fix scaled_float numeric type in aggregations {pull}22351[#22351] (issue: {issue}22350[#22350]) +* Allow terms aggregations on pure boolean scripts. {pull}22201[#22201] (issue: {issue}20941[#20941]) +* Fix numeric terms aggregations with includes/excludes and minDocCount=0 {pull}22141[#22141] (issue: {issue}22140[#22140]) +* Fix `missing` on aggs on `boolean` fields. {pull}22135[#22135] (issue: {issue}22009[#22009]) +* IP range masks exclude the maximum address of the range. {pull}22018[#22018] (issue: {issue}22005[#22005]) +* Fix `other_bucket` on the `filters` agg to be enabled if a key is set. {pull}21994[#21994] (issue: {issue}21951[#21951]) +* Rewrite Queries/Filter in FilterAggregationBuilder and ensure client usage marks query as non-cachable {pull}21303[#21303] (issue: {issue}21301[#21301]) +* Percentiles bucket fails for 100th percentile {pull}21218[#21218] +* Thread safety for scripted significance heuristics {pull}21113[#21113] (issue: {issue}18120[#18120]) +* `ip_range` aggregation should accept null bounds. {pull}21043[#21043] (issue: {issue}21006[#21006]) +* Fixes bug preventing script sort working on top_hits aggregation {pull}21023[#21023] (issue: {issue}21022[#21022]) +* Fixed writeable name from range to geo_distance {pull}20860[#20860] +* Fix date_range aggregation to not cache if now is used {pull}20740[#20740] +* The `top_hits` aggregation should compile scripts only once. {pull}20738[#20738] + +Allocation:: +* Discard stale node responses from async shard fetching {pull}24434[#24434] (issue: {issue}24007[#24007]) +* Cannot force allocate primary to a node where the shard already exists {pull}22031[#22031] (issue: {issue}22021[#22021]) +* Promote shadow replica to primary when initializing primary fails {pull}22021[#22021] +* Trim in-sync allocations set only when it grows {pull}21976[#21976] (issue: {issue}21719[#21719]) +* Allow master to assign primary shard to node that has shard store locked during shard state fetching {pull}21656[#21656] (issue: {issue}19416[#19416]) +* Keep a shadow replicas' allocation id when it is promoted to primary {pull}20863[#20863] (issue: {issue}20650[#20650]) +* IndicesClusterStateService should clean local started when re-assigns an initializing shard with the same aid {pull}20687[#20687] +* IndexRoutingTable.initializeEmpty shouldn't override supplied primary RecoverySource {pull}20638[#20638] (issue: {issue}20637[#20637]) +* Update incoming recoveries stats when shadow replica is reinitialized {pull}20612[#20612] +* `index.routing.allocation.initial_recovery` limits replica allocation {pull}20589[#20589] + +Analysis:: +* AsciiFoldingFilter's multi-term component should never preserve the original token. {pull}21982[#21982] +* Pre-built analysis factories do not implement MultiTermAware correctly. {pull}21981[#21981] +* Can load non-PreBuiltTokenFilter in Analyze API {pull}20396[#20396] +* Named analyzer should close the analyzer that it wraps {pull}20197[#20197] + +Bulk:: +* Reject empty IDs {pull}24118[#24118] (issue: {issue}24116[#24116]) + +CAT API:: +* Consume `full_id` request parameter early {pull}21270[#21270] (issue: {issue}21266[#21266]) + +CRUD:: +* Reject external versioning and explicit version numbers on create {pull}21998[#21998] +* MultiGet should not fail entirely if alias resolves to many indices {pull}20858[#20858] (issue: {issue}20845[#20845]) +* Fixed date math expression support in multi get requests. {pull}20659[#20659] (issue: {issue}17957[#17957]) + +Cache:: +* Invalidate cached query results if query timed out {pull}22807[#22807] (issue: {issue}22789[#22789]) +* Fix the request cache keys to not hold references to the SearchContext. {pull}21284[#21284] +* Prevent requests that use scripts or now() from being cached {pull}20750[#20750] (issue: {issue}20645[#20645]) + +Circuit Breakers:: +* ClusterState publishing shouldn't trigger circuit breakers {pull}20986[#20986] (issues: {issue}20827[#20827], {issue}20960[#20960]) + +Cluster:: +* Don't set local node on cluster state used for node join validation {pull}23311[#23311] (issues: {issue}21830[#21830], {issue}3[#3], {issue}4[#4], {issue}6[#6], {issue}9[#9]) +* Allow a cluster state applier to create an observer and wait for a better state {pull}23132[#23132] (issue: {issue}21817[#21817]) +* Cluster allocation explain to never return empty response body {pull}23054[#23054] +* IndicesService handles all exceptions during index deletion {pull}22433[#22433] +* Remove cluster update task when task times out {pull}21578[#21578] (issue: {issue}21568[#21568]) + +Core:: +* Check for default.path.data included in path.data {pull}24285[#24285] (issue: {issue}24283[#24283]) +* Improve performance of extracting warning value {pull}24114[#24114] (issue: {issue}24018[#24018]) +* Reject duplicate settings on the command line {pull}24053[#24053] +* Restrict build info loading to ES jar, not any jar {pull}24049[#24049] (issue: {issue}21955[#21955]) +* Streamline foreign stored context restore and allow to perserve response headers {pull}22677[#22677] (issue: {issue}22647[#22647]) +* Support negative numbers in readVLong {pull}22314[#22314] +* Add a StreamInput#readArraySize method that ensures sane array sizes {pull}21697[#21697] +* Use a buffer to do character to byte conversion in StreamOutput#writeString {pull}21680[#21680] (issue: {issue}21660[#21660]) +* Fix ShardInfo#toString {pull}21319[#21319] +* Protect BytesStreamOutput against overflows of the current number of written bytes. {pull}21174[#21174] (issue: {issue}21159[#21159]) +* Return target index name even if _rollover conditions are not met {pull}21138[#21138] +* .es_temp_file remains after system crash, causing it not to start again {pull}21007[#21007] (issue: {issue}20992[#20992]) +* StoreStatsCache should also ignore AccessDeniedException when checking file size {pull}20790[#20790] (issue: {issue}17580[#17580]) + +Dates:: +* Fix time zone rounding edge case for DST overlaps {pull}21550[#21550] (issue: {issue}20833[#20833]) + +Discovery:: +* ZenDiscovery - only validate min_master_nodes values if local node is master {pull}23915[#23915] (issue: {issue}23695[#23695]) +* Close InputStream when receiving cluster state in PublishClusterStateAction {pull}22711[#22711] +* Do not reply to pings from another cluster {pull}21894[#21894] (issue: {issue}21874[#21874]) +* Add current cluster state version to zen pings and use them in master election {pull}20384[#20384] (issue: {issue}20348[#20348]) + +Engine:: +* Close and flush refresh listeners on shard close {pull}22342[#22342] +* Die with dignity on the Lucene layer {pull}21721[#21721] (issue: {issue}19272[#19272]) +* Fix `InternalEngine#isThrottled` to not always return `false`. {pull}21592[#21592] +* Retrying replication requests on replica doesn't call `onRetry` {pull}21189[#21189] (issue: {issue}20211[#20211]) +* Take refresh IOExceptions into account when catching ACE in InternalEngine {pull}20546[#20546] (issue: {issue}19975[#19975]) + +Exceptions:: +* Stop returning "es." internal exception headers as http response headers {pull}22703[#22703] (issue: {issue}17593[#17593]) +* Fixing shard recovery error message to report the number of docs correctly for each node {pull}22515[#22515] (issue: {issue}21893[#21893]) + +Highlighting:: +* Fix FiltersFunctionScoreQuery highlighting {pull}21827[#21827] +* Fix highlighting on a stored keyword field {pull}21645[#21645] (issue: {issue}21636[#21636]) +* Fix highlighting of MultiTermQuery within a FunctionScoreQuery {pull}20400[#20400] (issue: {issue}20392[#20392]) + +Index APIs:: +* Fixes restore of a shrunken index when initial recovery node is gone {pull}24322[#24322] (issue: {issue}24257[#24257]) +* Honor update request timeout {pull}23825[#23825] +* Ensure shrunk indices carry over version information from its source {pull}22469[#22469] (issue: {issue}22373[#22373]) +* Validate the `_rollover` target index name early to also fail if dry_run=true {pull}21330[#21330] (issue: {issue}21149[#21149]) +* Only negate index expression on all indices with preceding wildcard {pull}20898[#20898] (issues: {issue}19800[#19800], {issue}20033[#20033]) +* Fix IndexNotFoundException in multi index search request. {pull}20188[#20188] (issue: {issue}3839[#3839]) + +Index Templates:: +* Fix integer overflows when dealing with templates. {pull}21628[#21628] (issue: {issue}21622[#21622]) + +Ingest:: +* Improve missing ingest processor error {pull}23379[#23379] (issue: {issue}23392[#23392]) +* update _ingest.timestamp to use new ZonedDateTime {pull}23174[#23174] (issue: {issue}23168[#23168]) +* fix date-processor to a new default year for every new pipeline execution {pull}22601[#22601] (issue: {issue}22547[#22547]) +* fix index out of bounds error in KV Processor {pull}22288[#22288] (issue: {issue}22272[#22272]) +* Fixes GrokProcessor's ignorance of named-captures with same name. {pull}22131[#22131] (issue: {issue}22117[#22117]) +* fix trace_match behavior for when there is only one grok pattern {pull}21413[#21413] (issue: {issue}21371[#21371]) +* Stored scripts and ingest node configurations should be included into a snapshot {pull}21227[#21227] (issue: {issue}21184[#21184]) +* make painless the default scripting language for ScriptProcessor {pull}20981[#20981] (issue: {issue}20943[#20943]) +* no null values in ingest configuration error messages {pull}20616[#20616] +* JSON Processor was not properly added {pull}20613[#20613] + +Inner Hits:: +* Replace NestedChildrenQuery with ParentChildrenBlockJoinQuery {pull}24016[#24016] (issue: {issue}24009[#24009]) +* Changed DisMaxQueryBuilder to extract inner hits from leaf queries {pull}23512[#23512] (issue: {issue}23482[#23482]) +* Inner hits and ignore unmapped {pull}21693[#21693] (issue: {issue}21620[#21620]) +* Skip adding a parent field to nested documents. {pull}21522[#21522] (issue: {issue}21503[#21503]) + +Internal:: +* Fix NPE if field caps request has a field that exists not in all indices {pull}24504[#24504] +* Add infrastructure to mark contexts as system contexts {pull}23830[#23830] +* Always restore the ThreadContext for operations delayed due to a block {pull}23349[#23349] +* Index creation and setting update may not return deprecation logging {pull}22702[#22702] +* Rethrow ExecutionException from the loader to concurrent callers of Cache#computeIfAbsent {pull}21549[#21549] +* Restore thread's original context before returning to the ThreadPool {pull}21411[#21411] +* Fix NPE in SearchContext.toString() {pull}21069[#21069] +* Prevent AbstractArrays from release bytes more than once {pull}20819[#20819] +* Source filtering should treat dots in field names as sub objects. {pull}20736[#20736] (issue: {issue}20719[#20719]) +* IndicesAliasesRequest should not implement CompositeIndicesRequest {pull}20726[#20726] +* Ensure elasticsearch doesn't start with unuspported indices {pull}20514[#20514] (issue: {issue}20512[#20512]) + +Java API:: +* Don't output empty ext object in SearchSourceBuilder#toXContent {pull}22093[#22093] (issue: {issue}20969[#20969]) +* Transport client: Fix remove address to actually work {pull}21743[#21743] +* Add a HostFailureListener to notify client code if a node got disconnected {pull}21709[#21709] (issue: {issue}21424[#21424]) +* Fix InternalSearchHit#hasSource to return the proper boolean value {pull}21441[#21441] (issue: {issue}21419[#21419]) +* Null checked for source when calling sourceRef {pull}21431[#21431] (issue: {issue}19279[#19279]) +* ClusterAdminClient.prepareDeletePipeline method should accept pipeline id to delete {pull}21228[#21228] +* fix IndexResponse#toString to print out shards info {pull}20562[#20562] + +Java High Level REST Client:: +* Correctly parse BulkItemResponse.Failure's status {pull}23432[#23432] + +Java REST Client:: +* Make buffer limit configurable in HeapBufferedConsumerFactory {pull}23970[#23970] (issue: {issue}23958[#23958]) +* RestClient asynchronous execution should not throw exceptions {pull}23307[#23307] +* Don't use null charset in RequestLogger {pull}22197[#22197] (issue: {issue}22190[#22190]) +* Rest client: don't reuse the same HttpAsyncResponseConsumer across multiple retries {pull}21378[#21378] + +Logging:: +* Do not prematurely shutdown Log4j {pull}21519[#21519] (issue: {issue}21514[#21514]) +* Assert status logger does not warn on Log4j usage {pull}21339[#21339] +* Fix logger names for Netty {pull}21223[#21223] (issue: {issue}20457[#20457]) +* Fix logger when you can not create an azure storage client {pull}20670[#20670] (issues: {issue}20633[#20633], {issue}20669[#20669]) +* Avoid unnecessary creation of prefix loggers {pull}20571[#20571] (issue: {issue}20570[#20570]) +* Fix logging hierarchy configs {pull}20463[#20463] +* Fix prefix logging {pull}20429[#20429] + +Mapping:: +* Preserve response headers when creating an index {pull}23950[#23950] (issue: {issue}23947[#23947]) +* Improves disabled fielddata error message {pull}23841[#23841] (issue: {issue}22768[#22768]) +* Fix MapperService StackOverflowError {pull}23605[#23605] (issue: {issue}23604[#23604]) +* Fix NPE with scaled floats stats when field is not indexed {pull}23528[#23528] (issue: {issue}23487[#23487]) +* Range types causing `GetFieldMappingsIndexRequest` to fail due to `NullPointerException` in `RangeFieldMapper.doXContentBody` when `include_defaults=true` is on the query string {pull}22925[#22925] +* Disallow introducing illegal object mappings (double '..') {pull}22891[#22891] (issue: {issue}22794[#22794]) +* The `_all` default mapper is not completely configured. {pull}22236[#22236] +* Fix MapperService.allEnabled(). {pull}22227[#22227] +* Dynamic `date` fields should use the `format` that was used to detect it is a date. {pull}22174[#22174] (issue: {issue}9410[#9410]) +* Sub-fields should not accept `include_in_all` parameter {pull}21971[#21971] (issue: {issue}21710[#21710]) +* Mappings: Fix get mapping when no indexes exist to not fail in response generation {pull}21924[#21924] (issue: {issue}21916[#21916]) +* Fail to index fields with dots in field names when one of the intermediate objects is nested. {pull}21787[#21787] (issue: {issue}21726[#21726]) +* Uncommitted mapping updates should not efect existing indices {pull}21306[#21306] (issue: {issue}21189[#21189]) + +Nested Docs:: +* Fix bug in query builder rewrite that ignores the ignore_unmapped option {pull}22456[#22456] + +Network:: +* Respect promises on pipelined responses {pull}23317[#23317] (issues: {issue}23310[#23310], {issue}23322[#23322]) +* Ensure that releasing listener is called {pull}23310[#23310] +* Pass `forceExecution` flag to transport interceptor {pull}22739[#22739] +* Ensure new connections won't be opened if transport is closed or closing {pull}22589[#22589] (issue: {issue}22554[#22554]) +* Prevent open channel leaks if handshake times out or is interrupted {pull}22554[#22554] +* Execute low level handshake in #openConnection {pull}22440[#22440] +* Handle connection close / reset events gracefully during handshake {pull}22178[#22178] +* Do not lose host information when pinging {pull}21939[#21939] (issue: {issue}21828[#21828]) +* DiscoveryNode and TransportAddress should preserve host information {pull}21828[#21828] +* Die with dignity on the network layer {pull}21720[#21720] (issue: {issue}19272[#19272]) +* Fix connection close header handling {pull}20956[#20956] (issue: {issue}20938[#20938]) +* Ensure port range is readable in the exception message {pull}20893[#20893] +* Prevent double release in TcpTransport if send listener throws an exception {pull}20880[#20880] + +Packaging:: +* Fall back to non-atomic move when removing plugins {pull}23548[#23548] (issue: {issue}35[#35]) +* Another fix for handling of paths on Windows {pull}22132[#22132] (issue: {issue}21921[#21921]) +* Fix handling of spaces in Windows paths {pull}21921[#21921] (issues: {issue}20809[#20809], {issue}21525[#21525]) +* Add option to skip kernel parameters on install {pull}21899[#21899] (issue: {issue}21877[#21877]) +* Set vm.max_map_count on systemd package install {pull}21507[#21507] +* Export ES_JVM_OPTIONS for SysV init {pull}21445[#21445] (issue: {issue}21255[#21255]) +* Debian: configure start-stop-daemon to not go into background {pull}21343[#21343] (issues: {issue}12716[#12716], {issue}21300[#21300]) +* Generate POM files with non-wildcard excludes {pull}21234[#21234] (issue: {issue}21170[#21170]) +* [Packaging] Do not remove scripts directory on upgrade {pull}20452[#20452] +* [Package] Remove bin/lib/modules directories on RPM uninstall/upgrade {pull}20448[#20448] + +Parent/Child:: +* Add null check in case of orphan child document {pull}22772[#22772] (issue: {issue}22770[#22770]) + +Percolator:: +* Fix memory leak when percolator uses bitset or field data cache {pull}24115[#24115] (issue: {issue}24108[#24108]) +* Fix NPE in percolator's 'now' range check for percolator queries with range queries {pull}22356[#22356] (issue: {issue}22355[#22355]) + +Plugin Analysis Stempel:: +* Fix thread safety of Stempel's token filter factory {pull}22610[#22610] (issue: {issue}21911[#21911]) + +Plugin Discovery EC2:: +* Fix ec2 discovery when used with IAM profiles. {pull}21048[#21048] (issue: {issue}21039[#21039]) + +Plugin Ingest GeoIp:: +* [ingest-geoip] update geoip to not include null-valued results from {pull}20455[#20455] + +Plugin Lang Painless:: +* painless: Fix method references to ctor with the new LambdaBootstrap and cleanup code {pull}24406[#24406] +* Fix Painless Lambdas for Java 9 {pull}24070[#24070] (issue: {issue}23473[#23473]) +* Fix painless's regex lexer and error messages {pull}23634[#23634] +* Replace Painless's Cast with casting strategies {pull}23369[#23369] +* Fix Bad Casts In Painless {pull}23282[#23282] (issue: {issue}23238[#23238]) +* Don't allow casting from void to def in painless {pull}22969[#22969] (issue: {issue}22908[#22908]) +* Fix def invoked qualified method refs {pull}22918[#22918] +* Whitelist some ScriptDocValues in painless {pull}22600[#22600] (issue: {issue}22584[#22584]) +* Update Painless Loop Counter to be Higher {pull}22560[#22560] (issue: {issue}22508[#22508]) +* Fix some issues with painless's strings {pull}22393[#22393] (issue: {issue}22372[#22372]) +* Test fix for def equals in Painless {pull}21945[#21945] (issue: {issue}21801[#21801]) +* Fix a VerifyError bug in Painless {pull}21765[#21765] +* Fix Lambdas in Painless to be Able to Use Top-Level Variables Such as params and doc {pull}21635[#21635] (issues: {issue}20869[#20869], {issue}21479[#21479]) +* Fix String Concatenation Bug In Painless {pull}20623[#20623] + +Plugin Repository Azure:: +* Azure blob store's readBlob() method first checks if the blob exists {pull}23483[#23483] (issue: {issue}23480[#23480]) +* Fixes default chunk size for Azure repositories {pull}22577[#22577] (issue: {issue}22513[#22513]) +* readonly on azure repository must be taken into account {pull}22055[#22055] (issues: {issue}22007[#22007], {issue}22053[#22053]) + +Plugin Repository HDFS:: +* Fixing permission errors for `KERBEROS` security mode for HDFS Repository {pull}23439[#23439] (issue: {issue}22156[#22156]) + +Plugin Repository S3:: +* Handle BlobPath's trailing separator case. Add test cases to BlobPathTests.java {pull}23091[#23091] +* Fixes leading forward slash in S3 repository base_path {pull}20861[#20861] + +Plugins:: +* Fix delete of plugin directory on remove plugin {pull}24266[#24266] (issue: {issue}24252[#24252]) +* Use a marker file when removing a plugin {pull}24252[#24252] (issue: {issue}24231[#24231]) +* Remove hidden file leniency from plugin service {pull}23982[#23982] (issue: {issue}12465[#12465]) +* Add check for null pluginName in remove command {pull}22930[#22930] (issue: {issue}22922[#22922]) +* Use sysprop like with es.path.home to pass conf dir {pull}18870[#18870] (issue: {issue}18689[#18689]) + +Query DSL:: +* FuzzyQueryBuilder should error when parsing array of values {pull}23762[#23762] (issue: {issue}23759[#23759]) +* Fix parsing for `max_determinized_states` {pull}22749[#22749] (issue: {issue}22722[#22722]) +* Fix script score function that combines _score and weight {pull}22713[#22713] (issue: {issue}21483[#21483]) +* Fixes date range query using epoch with timezone {pull}21542[#21542] (issue: {issue}21501[#21501]) +* Allow overriding all-field leniency when `lenient` option is specified {pull}21504[#21504] (issues: {issue}20925[#20925], {issue}21341[#21341]) +* Max score should be updated when a rescorer is used {pull}20977[#20977] (issue: {issue}20651[#20651]) +* Fixes MultiMatchQuery so that it doesn't provide a null context {pull}20882[#20882] +* Fix silently accepting malformed queries {pull}20515[#20515] (issue: {issue}20500[#20500]) +* Fix match_phrase_prefix query with single term on _all field {pull}20471[#20471] (issue: {issue}20470[#20470]) + +REST:: +* [API] change wait_for_completion default according to docs {pull}23672[#23672] +* Deprecate request_cache for clear-cache {pull}23638[#23638] (issue: {issue}22748[#22748]) +* HTTP transport stashes the ThreadContext instead of the RestController {pull}23456[#23456] +* Fix date format in warning headers {pull}23418[#23418] (issue: {issue}23275[#23275]) +* Align REST specs for HEAD requests {pull}23313[#23313] (issue: {issue}21125[#21125]) +* Correct warning header to be compliant {pull}23275[#23275] (issue: {issue}22986[#22986]) +* Fix get HEAD requests {pull}23186[#23186] (issue: {issue}21125[#21125]) +* Handle bad HTTP requests {pull}23153[#23153] (issue: {issue}23034[#23034]) +* Fix get source HEAD requests {pull}23151[#23151] (issue: {issue}21125[#21125]) +* Properly encode location header {pull}23133[#23133] (issues: {issue}21057[#21057], {issue}23115[#23115]) +* Fix template HEAD requests {pull}23130[#23130] (issue: {issue}21125[#21125]) +* Fix index HEAD requests {pull}23112[#23112] (issue: {issue}21125[#21125]) +* Fix alias HEAD requests {pull}23094[#23094] (issue: {issue}21125[#21125]) +* Strict level parsing for indices stats {pull}21577[#21577] (issue: {issue}21024[#21024]) +* The routing query string param is supported by mget but was missing from the rest spec {pull}21357[#21357] +* fix thread_pool_patterns path variable definition {pull}21332[#21332] +* Read indices options in indices upgrade API {pull}21281[#21281] (issue: {issue}21099[#21099]) +* ensure the XContentBuilder is always closed in RestBuilderListener {pull}21124[#21124] +* Add correct Content-Length on HEAD requests {pull}21123[#21123] (issue: {issue}21077[#21077]) +* Make sure HEAD / has 0 Content-Length {pull}21077[#21077] (issue: {issue}21075[#21075]) +* Adds percent-encoding for Location headers {pull}21057[#21057] (issue: {issue}21016[#21016]) +* Whitelist node stats indices level parameter {pull}21024[#21024] (issue: {issue}20722[#20722]) +* Remove lenient URL parameter parsing {pull}20722[#20722] (issue: {issue}14719[#14719]) +* XContentBuilder: Avoid building self-referencing objects {pull}20550[#20550] (issues: {issue}19475[#19475], {issue}20540[#20540]) + +Recovery:: +* Provide target allocation id as part of start recovery request {pull}24333[#24333] (issue: {issue}24167[#24167]) +* Fix primary relocation for shadow replicas {pull}22474[#22474] (issue: {issue}20300[#20300]) +* Don't close store under CancellableThreads {pull}22434[#22434] (issue: {issue}22325[#22325]) +* Use a fresh recovery id when retrying recoveries {pull}22325[#22325] (issue: {issue}22043[#22043]) +* Allow flush/force_merge/upgrade on shard marked as relocated {pull}22078[#22078] (issue: {issue}22043[#22043]) +* Fix concurrency issues between cancelling a relocation and marking shard as relocated {pull}20443[#20443] + +Reindex API:: +* Fix throttled reindex_from_remote {pull}23953[#23953] (issues: {issue}23828[#23828], {issue}23945[#23945]) +* Fix reindex with a remote source on a version before 2.0.0 {pull}23805[#23805] +* Make reindex wait for cleanup before responding {pull}23677[#23677] (issue: {issue}23653[#23653]) +* Reindex: do not log when can't clear old scroll {pull}22942[#22942] (issue: {issue}22937[#22937]) +* Fix reindex-from-remote from <2.0 {pull}22931[#22931] (issue: {issue}22893[#22893]) +* Fix reindex from remote clearing scroll {pull}22525[#22525] (issue: {issue}22514[#22514]) +* Fix source filtering in reindex-from-remote {pull}22514[#22514] (issue: {issue}22507[#22507]) +* Remove content type detection from reindex-from-remote {pull}22504[#22504] (issue: {issue}22329[#22329]) +* Don't close rest client from its callback {pull}22061[#22061] (issue: {issue}22027[#22027]) +* Keep context during reindex's retries {pull}21941[#21941] +* Ignore IllegalArgumentException with assertVersionSerializable {pull}21409[#21409] (issues: {issue}20767[#20767], {issue}21350[#21350]) +* Bump reindex-from-remote's buffer to 200mb {pull}21222[#21222] (issue: {issue}21185[#21185]) +* Fix reindex-from-remote for parent/child from <2.0 {pull}21070[#21070] (issue: {issue}21044[#21044]) + +Scripting:: +* Convert script/template objects to json format internally {pull}23308[#23308] (issue: {issue}23245[#23245]) +* Script: Fix value of `ctx._now` to be current epoch time in milliseconds {pull}23175[#23175] (issue: {issue}23169[#23169]) +* Expose `ip` fields as strings in scripts. {pull}21997[#21997] (issue: {issue}21977[#21977]) +* Add support for booleans in scripts {pull}20950[#20950] (issue: {issue}20949[#20949]) +* Native scripts should be created once per index, not per segment. {pull}20609[#20609] + +Search:: +* Include all aliases including non-filtering in `_search_shards` response {pull}24489[#24489] +* Cross Cluster Search: propagate original indices per cluster {pull}24328[#24328] +* Query string default field {pull}24214[#24214] +* Speed up parsing of large `terms` queries. {pull}24210[#24210] +* IndicesQueryCache should delegate the scorerSupplier method. {pull}24209[#24209] +* Disable graph analysis at query time for shingle and cjk filters producing tokens of different size {pull}23920[#23920] (issue: {issue}23918[#23918]) +* Fix cross-cluster remote node gateway attributes {pull}23863[#23863] +* Use a fixed seed for computing term hashCode in TermsSliceQuery {pull}23795[#23795] +* Honor max concurrent searches in multi-search {pull}23538[#23538] (issue: {issue}23527[#23527]) +* Avoid stack overflow in multi-search {pull}23527[#23527] (issue: {issue}23523[#23523]) +* Fix query_string_query to transform "foo:*" in an exists query on the field name {pull}23433[#23433] (issue: {issue}23356[#23356]) +* Factor out filling of TopDocs in SearchPhaseController {pull}23380[#23380] (issues: {issue}19356[#19356], {issue}23357[#23357]) +* Replace blocking calls in ExpandCollapseSearchResponseListener by asynchronous requests {pull}23053[#23053] (issue: {issue}23048[#23048]) +* Ensure fixed serialization order of InnerHitBuilder {pull}22820[#22820] (issue: {issue}22808[#22808]) +* Improve concurrency of ShardCoreKeyMap. {pull}22316[#22316] +* Make `-0` compare less than `+0` consistently. {pull}22173[#22173] (issue: {issue}22167[#22167]) +* Fix boost_mode propagation when the function score query builder is rewritten {pull}22172[#22172] (issue: {issue}22138[#22138]) +* FiltersAggregationBuilder: rewriting filter queries, the same way as in FilterAggregationBuilder {pull}22076[#22076] +* Fix cross_fields type on multi_match query with synonyms {pull}21638[#21638] (issue: {issue}21633[#21633]) +* Fix match_phrase_prefix on boosted fields {pull}21623[#21623] (issue: {issue}21613[#21613]) +* Respect default search timeout {pull}21599[#21599] (issues: {issue}12211[#12211], {issue}21595[#21595]) +* Remove LateParsingQuery to prevent timestamp access after context is frozen {pull}21328[#21328] (issue: {issue}21295[#21295]) +* Make range queries round up upper bounds again. {pull}20582[#20582] (issues: {issue}20579[#20579], {issue}8889[#8889]) +* Throw error when trying to fetch fields from source and source is disabled {pull}20424[#20424] (issues: {issue}20093[#20093], {issue}20408[#20408]) + +Search Templates:: +* No longer add illegal content type option to stored search templates {pull}24251[#24251] (issue: {issue}24227[#24227]) +* SearchTemplateRequest to implement CompositeIndicesRequest {pull}21865[#21865] (issue: {issue}21747[#21747]) + +Settings:: +* Do not set path.data in environment if not set {pull}24132[#24132] (issue: {issue}24099[#24099]) +* Correct handling of default and array settings {pull}24074[#24074] (issues: {issue}23981[#23981], {issue}24052[#24052]) +* Fix merge scheduler config settings {pull}23391[#23391] +* Settings: Fix keystore cli prompting for yes/no to handle console returning null {pull}23320[#23320] +* Expose `search.highlight.term_vector_multi_value` as a node level setting {pull}22999[#22999] +* NPE when no setting name passed to elasticsearch-keystore {pull}22609[#22609] +* Handle spaces in `action.auto_create_index` gracefully {pull}21790[#21790] (issue: {issue}21449[#21449]) +* Fix settings diff generation for affix and group settings {pull}21788[#21788] +* Don't reset non-dynamic settings unless explicitly requested {pull}21646[#21646] (issue: {issue}21593[#21593]) +* Fix Setting.timeValue() method {pull}20696[#20696] (issue: {issue}20662[#20662]) +* Add a hard limit for `index.number_of_shard` {pull}20682[#20682] +* Include complex settings in settings requests {pull}20622[#20622] + +Snapshot/Restore:: +* Fixes maintaining the shards a snapshot is waiting on {pull}24289[#24289] +* Fixes snapshot status on failed snapshots {pull}23833[#23833] (issue: {issue}23716[#23716]) +* Fixes snapshot deletion handling on in-progress snapshot failure {pull}23703[#23703] (issue: {issue}23663[#23663]) +* Prioritize listing index-N blobs over index.latest in reading snapshots {pull}23333[#23333] +* Gracefully handles pre 2.x compressed snapshots {pull}22267[#22267] +* URLRepository should throw NoSuchFileException to correctly adhere to readBlob contract {pull}22069[#22069] (issue: {issue}22004[#22004]) +* Fixes shard level snapshot metadata loading when index-N file is missing {pull}21813[#21813] +* Ensures cleanup of temporary index-* generational blobs during snapshotting {pull}21469[#21469] (issue: {issue}21462[#21462]) +* Fixes get snapshot duplicates when asking for _all {pull}21340[#21340] (issue: {issue}21335[#21335]) + +Stats:: +* Avoid overflow when computing total FS stats {pull}23641[#23641] +* Handle existence of cgroup version 2 hierarchy {pull}23493[#23493] (issue: {issue}23486[#23486]) +* Handle long overflow when adding paths' totals {pull}23293[#23293] (issue: {issue}23093[#23093]) +* Fix control group pattern {pull}23219[#23219] (issue: {issue}23218[#23218]) +* Fix total disk bytes returning negative value {pull}23093[#23093] +* Implement stats for geo_point and geo_shape field {pull}22391[#22391] (issue: {issue}22384[#22384]) +* Use reader for doc stats {pull}22317[#22317] (issue: {issue}22285[#22285]) +* Avoid NPE in NodeService#stats if HTTP is disabled {pull}22060[#22060] (issue: {issue}22058[#22058]) +* Add support for "include_segment_file_sizes" in indices stats REST handler {pull}21879[#21879] (issue: {issue}21878[#21878]) +* Remove output_uuid parameter from cluster stats {pull}21020[#21020] (issue: {issue}20722[#20722]) +* Fix FieldStats deserialization of `ip` field {pull}20522[#20522] (issue: {issue}20516[#20516]) + +Task Manager:: +* Task Management: Make TaskInfo parsing forwards compatible {pull}24073[#24073] (issue: {issue}23250[#23250]) +* Fix hanging cancelling task with no children {pull}22796[#22796] +* Fix broken TaskInfo.toString() {pull}22698[#22698] (issue: {issue}22387[#22387]) +* Task cancellation command should wait for all child nodes to receive cancellation request before returning {pull}21397[#21397] (issue: {issue}21126[#21126]) + +Term Vectors:: +* Fix _termvectors with preference to not hit NPE {pull}21959[#21959] +* Return correct term statistics when a field is not found in a shard {pull}21922[#21922] (issue: {issue}21906[#21906]) + +Tribe Node:: +* Add socket permissions for tribe nodes {pull}21546[#21546] (issues: {issue}16392[#16392], {issue}21122[#21122]) + +[float] +=== Regressions + +Bulk:: +* Fix _bulk response when it can't create an index {pull}24048[#24048] (issues: {issue}22488[#22488], {issue}24028[#24028]) + +Core:: +* Source filtering: only accept array items if the previous include pattern matches {pull}22593[#22593] (issue: {issue}22557[#22557]) + +Highlighting:: +* Handle SynonymQuery extraction for the FastVectorHighlighter {pull}20829[#20829] (issue: {issue}20781[#20781]) + +Logging:: +* Restores the original default format of search slow log {pull}21770[#21770] (issue: {issue}21711[#21711]) + +Network:: +* You had one job Netty logging guard {pull}24469[#24469] (issues: {issue}5624[#5624], {issue}6568[#6568]) + +Plugin Discovery EC2:: +* Fix ec2 discovery when used with IAM profiles. {pull}21042[#21042] (issue: {issue}21039[#21039]) + +Plugin Repository S3:: +* Fix s3 repository when used with IAM profiles {pull}21058[#21058] (issue: {issue}21048[#21048]) + +Plugins:: +* Plugins: Add back user agent when downloading plugins {pull}20872[#20872] + +Search:: +* Handle specialized term queries in MappedFieldType.extractTerm(Query) {pull}21889[#21889] (issue: {issue}21882[#21882]) + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Aggregations:: +* Upgrade HDRHistogram to 2.1.9 {pull}23254[#23254] (issue: {issue}23239[#23239]) + +Core:: +* Upgrade to Lucene 6.5.0 {pull}23750[#23750] +* Upgrade from JNA 4.2.2 to JNA 4.4.0 {pull}23636[#23636] +* Upgrade to lucene-6.5.0-snapshot-d00c5ca {pull}23385[#23385] +* Upgrade to lucene-6.5.0-snapshot-f919485. {pull}23087[#23087] +* Upgrade to Lucene 6.4.0 {pull}22724[#22724] +* Update Jackson to 2.8.6 {pull}22596[#22596] (issue: {issue}22266[#22266]) +* Upgrade to lucene-6.4.0-snapshot-084f7a0. {pull}22413[#22413] +* Upgrade to lucene-6.4.0-snapshot-ec38570 {pull}21853[#21853] +* Upgrade to lucene-6.3.0. {pull}21464[#21464] + +Dates:: +* Update Joda Time to version 2.9.5 {pull}21468[#21468] (issues: {issue}20911[#20911], {issue}332[#332], {issue}373[#373], {issue}378[#378], {issue}379[#379], {issue}386[#386], {issue}394[#394], {issue}396[#396], {issue}397[#397], {issue}404[#404], {issue}69[#69]) + +Internal:: +* Upgrade to Lucene 6.4.1. {pull}22978[#22978] -=== Known Issues +Logging:: +* Upgrade to Log4j 2.8.2 {pull}23995[#23995] +* Upgrade Log4j 2 to version 2.7 {pull}20805[#20805] (issue: {issue}20304[#20304]) +Network:: +* Upgrade Netty to 4.1.10.Final {pull}24414[#24414] +* Upgrade to Netty 4.1.9 {pull}23540[#23540] (issues: {issue}23172[#23172], {issue}6308[#6308], {issue}6374[#6374]) +* Upgrade to Netty 4.1.8 {pull}23055[#23055] +* Upgrade to Netty 4.1.7 {pull}22587[#22587] +* Upgrade to Netty 4.1.6 {pull}21051[#21051] +Plugin Repository Azure:: +* Update to Azure Storage 5.0.0 {pull}23517[#23517] (issue: {issue}23448[#23448]) diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index 0bc8610e0c792..f076a7b83585a 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -1,13 +1,12 @@ [[cluster-reroute]] == Cluster Reroute -The reroute command allows to explicitly execute a cluster reroute -allocation command including specific commands. For example, a shard can -be moved from one node to another explicitly, an allocation can be -canceled, or an unassigned shard can be explicitly allocated on a -specific node. +The reroute command allows for manual changes to the allocation of individual +shards in the cluster. For example, a shard can be moved from one node to +another explicitly, an allocation can be cancelled, and an unassigned shard can +be explicitly allocated to a specific node. -Here is a short example of how a simple reroute API call: +Here is a short example of a simple reroute API call: [source,js] -------------------------------------------------- @@ -32,59 +31,53 @@ POST /_cluster/reroute // CONSOLE // TEST[skip:doc tests run with only a single node] -An important aspect to remember is the fact that once when an allocation -occurs, the cluster will aim at re-balancing its state back to an even -state. For example, if the allocation includes moving a shard from -`node1` to `node2`, in an `even` state, then another shard will be moved -from `node2` to `node1` to even things out. +It is important to note that that after processing any reroute commands +Elasticsearch will perform rebalancing as normal (respecting the values of +settings such as `cluster.routing.rebalance.enable`) in order to remain in a +balanced state. For example, if the requested allocation includes moving a +shard from `node1` to `node2` then this may cause a shard to be moved from +`node2` back to `node1` to even things out. -The cluster can be set to disable allocations, which means that only the -explicitly allocations will be performed. Obviously, only once all -commands has been applied, the cluster will aim to be re-balance its -state. +The cluster can be set to disable allocations using the +`cluster.routing.allocation.enable` setting. If allocations are disabled then +the only allocations that will be performed are explicit ones given using the +`reroute` command, and consequent allocations due to rebalancing. -Another option is to run the commands in `dry_run` (as a URI flag, or in -the request body). This will cause the commands to apply to the current -cluster state, and return the resulting cluster after the commands (and -re-balancing) has been applied. +It is possible to run `reroute` commands in "dry run" mode by using the +`?dry_run` URI query parameter, or by passing `"dry_run": true` in the request +body. This will calculate the result of applying the commands to the current +cluster state, and return the resulting cluster state after the commands (and +re-balancing) has been applied, but will not actually perform the requested +changes. -If the `explain` parameter is specified, a detailed explanation of why the -commands could or could not be executed is returned. +If the `?explain` URI query parameter is included then a detailed explanation +of why the commands could or could not be executed is included in the response. The commands supported are: `move`:: Move a started shard from one node to another node. Accepts `index` and `shard` for index name and shard number, `from_node` for the - node to move the shard `from`, and `to_node` for the node to move the + node to move the shard from, and `to_node` for the node to move the shard to. `cancel`:: - Cancel allocation of a shard (or recovery). Accepts `index` - and `shard` for index name and shard number, and `node` for the node to - cancel the shard allocation on. It also accepts `allow_primary` flag to - explicitly specify that it is allowed to cancel allocation for a primary - shard. This can be used to force resynchronization of existing replicas - from the primary shard by cancelling them and allowing them to be - reinitialized through the standard reallocation process. + Cancel allocation of a shard (or recovery). Accepts `index` and `shard` for + index name and shard number, and `node` for the node to cancel the shard + allocation on. This can be used to force resynchronization of existing + replicas from the primary shard by cancelling them and allowing them to be + reinitialized through the standard recovery process. By default only + replica shard allocations can be cancelled. If it is necessary to cancel + the allocation of a primary shard then the `allow_primary` flag must also + be included in the request. `allocate_replica`:: - Allocate an unassigned replica shard to a node. Accepts the - `index` and `shard` for index name and shard number, and `node` to - allocate the shard to. Takes <> into account. - -Two more commands are available that allow the allocation of a primary shard -to a node. These commands should however be used with extreme care, as primary -shard allocation is usually fully automatically handled by Elasticsearch. -Reasons why a primary shard cannot be automatically allocated include the following: - -- A new index was created but there is no node which satisfies the allocation deciders. -- An up-to-date shard copy of the data cannot be found on the current data nodes in -the cluster. To prevent data loss, the system does not automatically promote a stale -shard copy to primary. + Allocate an unassigned replica shard to a node. Accepts `index` and `shard` + for index name and shard number, and `node` to allocate the shard to. Takes + <> into account. [float] -=== Retry failed shards +=== Retrying failed allocations The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving @@ -93,36 +86,48 @@ structural problems such as having an analyzer which refers to a stopwords file which doesn't exist on all nodes. Once the problem has been corrected, allocation can be manually retried by -calling the <> API with `?retry_failed`, which -will attempt a single retry round for these shards. +calling the <> API with the `?retry_failed` URI +query parameter, which will attempt a single retry round for these shards. [float] === Forced allocation on unrecoverable errors +Two more commands are available that allow the allocation of a primary shard to +a node. These commands should however be used with extreme care, as primary +shard allocation is usually fully automatically handled by Elasticsearch. +Reasons why a primary shard cannot be automatically allocated include the +following: + +- A new index was created but there is no node which satisfies the allocation + deciders. +- An up-to-date shard copy of the data cannot be found on the current data + nodes in the cluster. To prevent data loss, the system does not automatically +promote a stale shard copy to primary. + The following two commands are dangerous and may result in data loss. They are -meant to be used in cases where the original data can not be recovered and the cluster -administrator accepts the loss. If you have suffered a temporary issue that has been -fixed, please see the `retry_failed` flag described above. +meant to be used in cases where the original data can not be recovered and the +cluster administrator accepts the loss. If you have suffered a temporary issue +that can be fixed, please see the `retry_failed` flag described above. To +emphasise: if these commands are performed and then a node joins the cluster +that holds a copy of the affected shard then the copy on the newly-joined node +will be deleted or overwritten. `allocate_stale_primary`:: Allocate a primary shard to a node that holds a stale copy. Accepts the - `index` and `shard` for index name and shard number, and `node` to - allocate the shard to. Using this command may lead to data loss - for the provided shard id. If a node which has the good copy of the - data rejoins the cluster later on, that data will be overwritten with - the data of the stale copy that was forcefully allocated with this - command. To ensure that these implications are well-understood, - this command requires the special field `accept_data_loss` to be - explicitly set to `true` for it to work. + `index` and `shard` for index name and shard number, and `node` to allocate + the shard to. Using this command may lead to data loss for the provided + shard id. If a node which has the good copy of the data rejoins the cluster + later on, that data will be deleted or overwritten with the data of the + stale copy that was forcefully allocated with this command. To ensure that + these implications are well-understood, this command requires the flag + `accept_data_loss` to be explicitly set to `true`. `allocate_empty_primary`:: - Allocate an empty primary shard to a node. Accepts the - `index` and `shard` for index name and shard number, and `node` to - allocate the shard to. Using this command leads to a complete loss - of all data that was indexed into this shard, if it was previously - started. If a node which has a copy of the - data rejoins the cluster later on, that data will be deleted! - To ensure that these implications are well-understood, - this command requires the special field `accept_data_loss` to be - explicitly set to `true` for it to work. + Allocate an empty primary shard to a node. Accepts the `index` and `shard` + for index name and shard number, and `node` to allocate the shard to. Using + this command leads to a complete loss of all data that was indexed into + this shard, if it was previously started. If a node which has a copy of the + data rejoins the cluster later on, that data will be deleted. To ensure + that these implications are well-understood, this command requires the flag + `accept_data_loss` to be explicitly set to `true`. diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc index d0ff3290c74d3..a20ff04d83f4a 100644 --- a/docs/reference/cluster/state.asciidoc +++ b/docs/reference/cluster/state.asciidoc @@ -15,6 +15,12 @@ of the cluster state (its size when serialized for transmission over the network), and the cluster state itself, which can be filtered to only retrieve the parts of interest, as described below. +The cluster's `cluster_uuid` is also returned as part of the top-level +response, in addition to the `metadata` section. added[6.4.0] + +NOTE: While the cluster is still forming, it is possible for the `cluster_uuid` + to be `_na_` as well as the cluster state's version to be `-1`. + By default, the cluster state request is routed to the master node, to ensure that the latest cluster state is returned. For debugging purposes, you can retrieve the cluster state local to a diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index db2ff3eeb6e3c..4c78f55f41f93 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -284,9 +284,12 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. +Delete-by-query is implemented using batches and any failure causes the entire +process to abort but all failures in the current batch are collected into the +array. You can use the `conflicts` option to prevent reindex from aborting on +version conflicts. [float] diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 5f34371ab8467..e8283abfc2ef0 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -161,12 +161,12 @@ POST _reindex `index` and `type` in `source` can both be lists, allowing you to copy from lots of sources in one request. This will copy documents from the `_doc` and -`post` types in the `twitter` and `blog` index. The copied documents would include the -`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more +`post` types in the `twitter` and `blog` index. The copied documents would include the +`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more specific parameters, you can use `query`. -The Reindex API makes no effort to handle ID collisions. For such issues, the target index -will remain valid, but it's not easy to predict which document will survive because +The Reindex API makes no effort to handle ID collisions. For such issues, the target index +will remain valid, but it's not easy to predict which document will survive because the iteration order isn't well defined. [source,js] @@ -666,9 +666,11 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. Reindex +is implemented using batches and any failure causes the entire process to abort +but all failures in the current batch are collected into the array. You can use +the `conflicts` option to prevent reindex from aborting on version conflicts. [float] [[docs-reindex-task-api]] @@ -1004,7 +1006,7 @@ number for most indices. If slicing manually or otherwise tuning automatic slicing, use these guidelines. Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large (e.g. 500), +number of shards in the index. If that number is large (e.g. 500), choose a lower number as too many `slices` will hurt performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. @@ -1018,7 +1020,7 @@ documents being reindexed and cluster resources. [float] === Reindex daily indices -You can use `_reindex` in combination with <> +You can use `_reindex` in combination with <> to reindex daily indices to apply a new template to the existing documents. Assuming you have indices consisting of documents as follows: diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 527a007c5f1a9..7d3dc48e3c102 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -338,9 +338,13 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. +Update-by-query is implemented using batches and any failure causes the entire +process to abort but all failures in the current batch are collected into the +array. You can use the `conflicts` option to prevent reindex from aborting on +version conflicts. + [float] diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc index 97db09ba656c7..cc0613ec2870d 100644 --- a/docs/reference/index-modules/merge.asciidoc +++ b/docs/reference/index-modules/merge.asciidoc @@ -23,7 +23,8 @@ The merge scheduler supports the following _dynamic_ setting: `index.merge.scheduler.max_thread_count`:: - The maximum number of threads that may be merging at once. Defaults to + The maximum number of threads on a single shard that may be merging at once. + Defaults to `Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))` which works well for a good solid-state-disk (SSD). If your index is on spinning platter drives instead, decrease this to 1. diff --git a/docs/reference/index-shared4.asciidoc b/docs/reference/index-shared4.asciidoc index 3d807dd98d39c..5e6ebc8a5a20c 100644 --- a/docs/reference/index-shared4.asciidoc +++ b/docs/reference/index-shared4.asciidoc @@ -5,4 +5,4 @@ include::testing.asciidoc[] include::glossary.asciidoc[] -include::release-notes.asciidoc[] +include::{docdir}/../CHANGELOG.asciidoc[] \ No newline at end of file diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 027cf8b924d36..81d79c47472df 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -119,9 +119,15 @@ POST my_source_index/_shrink/my_target_index segment. -NOTE: Mappings may not be specified in the `_shrink` request, and all -`index.analysis.*` and `index.similarity.*` settings will be overwritten with -the settings from the source index. +NOTE: Mappings may not be specified in the `_shrink` request. + +NOTE: By default, with the exception of `index.analysis`, `index.similarity`, +and `index.sort` settings, index settings on the source index are not copied +during a shrink operation. With the exception of non-copyable settings, settings +from the source index can be copied to the target index by adding the URL +parameter `copy_settings=true` to the request. + +deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] [float] === Monitoring the shrink process diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index f9f40401e11b7..1f5c0df9484ce 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -169,9 +169,15 @@ POST my_source_index/_split/my_target_index number of shards in the source index. -NOTE: Mappings may not be specified in the `_split` request, and all -`index.analysis.*` and `index.similarity.*` settings will be overwritten with -the settings from the source index. +NOTE: Mappings may not be specified in the `_split` request. + +NOTE: By default, with the exception of `index.analysis`, `index.similarity`, +and `index.sort` settings, index settings on the source index are not copied +during a split operation. With the exception of non-copyable settings, settings +from the source index can be copied to the target index by adding the URL +parameter `copy_settings=true` to the request. + +deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] [float] === Monitoring the split process diff --git a/docs/reference/migration/migrate_6_4.asciidoc b/docs/reference/migration/migrate_6_4.asciidoc index a761509597fd2..a0ba52412751a 100644 --- a/docs/reference/migration/migrate_6_4.asciidoc +++ b/docs/reference/migration/migrate_6_4.asciidoc @@ -10,3 +10,20 @@ In the past, `fields` could be provided either as a parameter, or as part of the body. Specifying `fields` in the request body is now deprecated, and instead they should always be supplied through a request parameter. In 7.0.0, the field capabilities API will not accept `fields` supplied in the request body. + +[[copy-source-settings-on-resize]] +==== Copying source settings during shrink/split operations + +In prior versions of Elasticsearch, resize operations (shrink/split) would only +copy `index.analysis`, `index.similarity`, and `index.sort` settings from the +source index. Elasticsearch 6.4.0 introduces a request parameter `copy_settings` +which will copy all index settings from the source except for non-copyable index +settings. This parameter defaults to `false` in 6.x, is immediately deprecated +in 6.4.0, will only be able to be set to `true` in 8.x, and will be removed in +9.0.0. Note than when this parameter is used it means that all copyable settings +will be copied; this includes the index blocks that must be put in place for a +resize operation, and any allocation settings put in place in preparation for +executing the resize operation. If you use this parameter, you will either have +to follow up the operation with a request to adjust to the desired settings on +the target index, or send the desired value of these settings with the resize +operation. diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc index 0f43d9fcd30c9..d93453a49e8ed 100644 --- a/docs/reference/modules/cluster/disk_allocator.asciidoc +++ b/docs/reference/modules/cluster/disk_allocator.asciidoc @@ -1,9 +1,9 @@ [[disk-allocator]] === Disk-based Shard Allocation -Elasticsearch factors in the available disk space on a node before deciding -whether to allocate new shards to that node or to actively relocate shards -away from that node. +Elasticsearch considers the available disk space on a node before deciding +whether to allocate new shards to that node or to actively relocate shards away +from that node. Below are the settings that can be configured in the `elasticsearch.yml` config file or updated dynamically on a live cluster with the @@ -15,29 +15,33 @@ file or updated dynamically on a live cluster with the `cluster.routing.allocation.disk.watermark.low`:: - Controls the low watermark for disk usage. It defaults to 85%, meaning ES will - not allocate new shards to nodes once they have more than 85% disk used. It - can also be set to an absolute byte value (like 500mb) to prevent ES from - allocating shards if less than the configured amount of space is available. + Controls the low watermark for disk usage. It defaults to `85%`, meaning + that Elasticsearch will not allocate shards to nodes that have more than + 85% disk used. It can also be set to an absolute byte value (like `500mb`) + to prevent Elasticsearch from allocating shards if less than the specified + amount of space is available. This setting has no effect on the primary + shards of newly-created indices or, specifically, any shards that have + never previously been allocated. `cluster.routing.allocation.disk.watermark.high`:: - Controls the high watermark. It defaults to 90%, meaning ES will attempt to - relocate shards to another node if the node disk usage rises above 90%. It can - also be set to an absolute byte value (similar to the low watermark) to - relocate shards once less than the configured amount of space is available on - the node. + Controls the high watermark. It defaults to `90%`, meaning that + Elasticsearch will attempt to relocate shards away from a node whose disk + usage is above 90%. It can also be set to an absolute byte value (similarly + to the low watermark) to relocate shards away from a node if it has less + than the specified amount of free space. This setting affects the + allocation of all shards, whether previously allocated or not. `cluster.routing.allocation.disk.watermark.flood_stage`:: + -- -Controls the flood stage watermark. It defaults to 95%, meaning ES enforces -a read-only index block (`index.blocks.read_only_allow_delete`) on every -index that has one or more shards allocated on the node that has at least -one disk exceeding the flood stage. This is a last resort to prevent nodes -from running out of disk space. The index block must be released manually -once there is enough disk space available to allow indexing operations to -continue. +Controls the flood stage watermark. It defaults to 95%, meaning that +Elasticsearch enforces a read-only index block +(`index.blocks.read_only_allow_delete`) on every index that has one or more +shards allocated on the node that has at least one disk exceeding the flood +stage. This is a last resort to prevent nodes from running out of disk space. +The index block must be released manually once there is enough disk space +available to allow indexing operations to continue. NOTE: You can not mix the usage of percentage values and byte values within these settings. Either all are set to percentage values, or all are set to byte @@ -67,12 +71,12 @@ PUT /twitter/_settings `cluster.routing.allocation.disk.include_relocations`:: Defaults to +true+, which means that Elasticsearch will take into account - shards that are currently being relocated to the target node when computing a - node's disk usage. Taking relocating shards' sizes into account may, however, - mean that the disk usage for a node is incorrectly estimated on the high side, - since the relocation could be 90% complete and a recently retrieved disk usage - would include the total size of the relocating shard as well as the space - already used by the running relocation. + shards that are currently being relocated to the target node when computing + a node's disk usage. Taking relocating shards' sizes into account may, + however, mean that the disk usage for a node is incorrectly estimated on + the high side, since the relocation could be 90% complete and a recently + retrieved disk usage would include the total size of the relocating shard + as well as the space already used by the running relocation. NOTE: Percentage values refer to used disk space, while byte values refer to diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index ea3f99debb94e..693d537d732c1 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -44,12 +44,12 @@ If you register same snapshot repository with multiple clusters, only one cluster should have write access to the repository. All other clusters connected to that repository should set the repository to `readonly` mode. -NOTE: The snapshot format can change across major versions, so if you have -clusters on different major versions trying to write the same repository, -new snapshots written by one version will not be visible to the other. While -setting the repository to `readonly` on all but one of the clusters should work -with multiple clusters differing by one major version, it is not a supported -configuration. +IMPORTANT: The snapshot format can change across major versions, so if you have +clusters on different versions trying to write the same repository, snapshots +written by one version may not be visible to the other and the repository could +be corrupted. While setting the repository to `readonly` on all but one of the +clusters should work with multiple clusters differing by one major version, it +is not a supported configuration. [source,js] ----------------------------------- diff --git a/docs/reference/release-notes/6.0.0-alpha1-5x.asciidoc b/docs/reference/release-notes/6.0.0-alpha1-5x.asciidoc deleted file mode 100644 index c44fd17b69057..0000000000000 --- a/docs/reference/release-notes/6.0.0-alpha1-5x.asciidoc +++ /dev/null @@ -1,1108 +0,0 @@ -[[release-notes-6.0.0-alpha1-5x]] -== 6.0.0-alpha1 Release Notes (Changes previously released in 5.x) - -The changes listed below were first released in the 5.x series. Changes -released for the first time in Elasticsearch 6.0.0-alpha1 are listed in -<>. - -[[breaking-6.0.0-alpha1-5x]] -[float] -=== Breaking changes - -Aliases:: -* Validate alias names the same as index names {pull}20771[#20771] (issue: {issue}20748[#20748]) - -CRUD:: -* Fixed naming inconsistency for fields/stored_fields in the APIs {pull}20166[#20166] (issues: {issue}18943[#18943], {issue}20155[#20155]) - -Core:: -* Add system call filter bootstrap check {pull}21940[#21940] -* Remove ignore system bootstrap checks {pull}20511[#20511] - -Internal:: -* `_flush` should block by default {pull}20597[#20597] (issue: {issue}20569[#20569]) - -Packaging:: -* Rename service.bat to elasticsearch-service.bat {pull}20496[#20496] (issue: {issue}17528[#17528]) - -Plugin Lang Painless:: -* Remove all date 'now' methods from Painless {pull}20766[#20766] (issue: {issue}20762[#20762]) - -Query DSL:: -* Fix name of `enabled_position_increments` {pull}22895[#22895] - -REST:: -* Change separator for shards preference {pull}20786[#20786] (issues: {issue}20722[#20722], {issue}20769[#20769]) - -Search:: -* Remove DFS_QUERY_AND_FETCH as a search type {pull}22787[#22787] - -Settings:: -* Remove support for default settings {pull}24093[#24093] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074]) - - - -[[breaking-java-6.0.0-alpha1-5x]] -[float] -=== Breaking Java changes - -Aggregations:: -* Move getProperty method out of MultiBucketsAggregation.Bucket interface {pull}23988[#23988] -* Remove getProperty method from Aggregations interface and impl {pull}23972[#23972] -* Move getProperty method out of Aggregation interface {pull}23949[#23949] - -Allocation:: -* Cluster Explain API uses the allocation process to explain shard allocation decisions {pull}22182[#22182] (issues: {issue}20347[#20347], {issue}20634[#20634], {issue}21103[#21103], {issue}21662[#21662], {issue}21691[#21691]) - -Cluster:: -* Remove PROTO-based custom cluster state components {pull}22336[#22336] (issue: {issue}21868[#21868]) - -Core:: -* Remove ability to plug-in TransportService {pull}20505[#20505] - -Discovery:: -* Remove pluggability of ElectMasterService {pull}21031[#21031] - -Exceptions:: -* Remove `IndexTemplateAlreadyExistsException` and `IndexShardAlreadyExistsException` {pull}21539[#21539] (issue: {issue}21494[#21494]) -* Replace IndexAlreadyExistsException with ResourceAlreadyExistsException {pull}21494[#21494] - -Ingest:: -* Change type of ingest doc meta-data field 'TIMESTAMP' to `Date` {pull}22234[#22234] (issue: {issue}22074[#22074]) - -Internal:: -* Replace SearchExtRegistry with namedObject {pull}22492[#22492] -* Replace Suggesters with namedObject {pull}22491[#22491] -* Consolidate the last easy parser construction {pull}22095[#22095] -* Introduce XContentParser#namedObject {pull}22003[#22003] -* Pass executor name to request interceptor to support async intercept calls {pull}21089[#21089] -* Remove TransportService#registerRequestHandler leniency {pull}20469[#20469] (issue: {issue}20468[#20468]) - -Java API:: -* Fold InternalSearchHits and friends into their interfaces {pull}23042[#23042] - -Network:: -* Remove HttpServer and HttpServerAdapter in favor of a simple dispatch method {pull}22636[#22636] (issue: {issue}18482[#18482]) -* Unguice Transport and friends {pull}20526[#20526] - -Plugins:: -* Deguice rest handlers {pull}22575[#22575] -* Plugins: Replace Rest filters with RestHandler wrapper {pull}21905[#21905] -* Plugins: Remove support for onModule {pull}21416[#21416] -* Cleanup sub fetch phase extension point {pull}20382[#20382] - -Query DSL:: -* Resolve index names in indices_boost {pull}21393[#21393] (issue: {issue}4756[#4756]) - -Scripting:: -* Refactor ScriptType to be a Top-Level Class {pull}21136[#21136] - -Search:: -* Remove QUERY_AND_FETCH search type {pull}22996[#22996] -* Cluster search shards improvements: expose ShardId, adjust visibility of some members {pull}21752[#21752] - - - -[[deprecation-6.0.0-alpha1-5x]] -[float] -=== Deprecations - -Java API:: -* Add BulkProcessor methods with XContentType parameter {pull}23078[#23078] (issue: {issue}22691[#22691]) -* Deprecate and remove "minimumNumberShouldMatch" in BoolQueryBuilder {pull}22403[#22403] - -Plugin Repository S3:: -* S3 Repository: Deprecate remaining `repositories.s3.*` settings {pull}24144[#24144] (issue: {issue}24143[#24143]) -* Deprecate specifying credentials through env vars, sys props, and remove profile files {pull}22567[#22567] (issues: {issue}21041[#21041], {issue}22479[#22479]) - -Query DSL:: -* Add deprecation logging message for 'fuzzy' query {pull}20993[#20993] (issue: {issue}15760[#15760]) - -REST:: -* Optionally require a valid content type for all rest requests with content {pull}22691[#22691] (issue: {issue}19388[#19388]) - -Scripting:: -* Change Namespace for Stored Script to Only Use Id {pull}22206[#22206] - -Shadow Replicas:: -* Add a deprecation notice to shadow replicas {pull}22647[#22647] (issue: {issue}22024[#22024]) - -Stats:: -* Deprecate _field_stats endpoint {pull}23914[#23914] - - - -[[feature-6.0.0-alpha1-5x]] -[float] -=== New features - -Aggregations:: -* Initial version of an adjacency matrix using the Filters aggregation {pull}22239[#22239] (issue: {issue}22169[#22169]) - -Analysis:: -* Adds pattern keyword marker filter support {pull}23600[#23600] (issue: {issue}4877[#4877]) -* Expose WordDelimiterGraphTokenFilter {pull}23327[#23327] (issue: {issue}23104[#23104]) -* Synonym Graph Support (LUCENE-6664) {pull}21517[#21517] -* Expose Lucenes Ukrainian analyzer {pull}21176[#21176] (issue: {issue}19433[#19433]) - -CAT API:: -* Provides a cat api endpoint for templates. {pull}20545[#20545] (issue: {issue}20467[#20467]) - -CRUD:: -* Allow an index to be partitioned with custom routing {pull}22274[#22274] (issue: {issue}21585[#21585]) - -Highlighting:: -* Integrate UnifiedHighlighter {pull}21621[#21621] (issue: {issue}21376[#21376]) - -Index APIs:: -* Add FieldCapabilities (_field_caps) API {pull}23007[#23007] (issue: {issue}22438[#22438]) - -Ingest:: -* introduce KV Processor in Ingest Node {pull}22272[#22272] (issue: {issue}22222[#22222]) - -Mapping:: -* Add the ability to set a normalizer on keyword fields. {pull}21919[#21919] (issue: {issue}18064[#18064]) -* Add RangeFieldMapper for numeric and date range types {pull}21002[#21002] (issue: {issue}20999[#20999]) - -Plugin Discovery File:: -* File-based discovery plugin {pull}20394[#20394] (issue: {issue}20323[#20323]) - -Query DSL:: -* Add "all fields" execution mode to simple_query_string query {pull}21341[#21341] (issues: {issue}19784[#19784], {issue}20925[#20925]) -* Add support for `quote_field_suffix` to `simple_query_string`. {pull}21060[#21060] (issue: {issue}18641[#18641]) -* Add "all field" execution mode to query_string query {pull}20925[#20925] (issue: {issue}19784[#19784]) - -Reindex API:: -* Add automatic parallelization support to reindex and friends {pull}20767[#20767] (issue: {issue}20624[#20624]) - -Search:: -* Introduce incremental reduction of TopDocs {pull}23946[#23946] -* Add federated cross cluster search capabilities {pull}22502[#22502] (issue: {issue}21473[#21473]) -* Add field collapsing for search request {pull}22337[#22337] (issue: {issue}21833[#21833]) - -Settings:: -* Add infrastructure for elasticsearch keystore {pull}22335[#22335] - -Similarities:: -* Adds boolean similarity to Elasticsearch {pull}23637[#23637] (issue: {issue}6731[#6731]) - - - -[[enhancement-6.0.0-alpha1-5x]] -[float] -=== Enhancements - -Aggregations:: -* Add `count` to rest output of `geo_centroid` {pull}24387[#24387] (issue: {issue}24366[#24366]) -* Allow scripted metric agg to access `_score` {pull}24295[#24295] -* Add BucketMetricValue interface {pull}24188[#24188] -* Move aggs CommonFields and TYPED_KEYS_DELIMITER from InternalAggregation to Aggregation {pull}23987[#23987] -* Use ParseField for aggs CommonFields rather than String {pull}23717[#23717] -* Share XContent rendering code in terms aggs {pull}23680[#23680] -* Add unit tests for ParentToChildAggregator {pull}23305[#23305] (issue: {issue}22278[#22278]) -* First step towards incremental reduction of query responses {pull}23253[#23253] -* `value_type` is useful regardless of scripting. {pull}22160[#22160] (issue: {issue}20163[#20163]) -* Support for partitioning set of terms {pull}21626[#21626] (issue: {issue}21487[#21487]) -* Rescorer should be applied in the TopHits aggregation {pull}20978[#20978] (issue: {issue}19317[#19317]) - -Aliases:: -* Handle multiple aliases in _cat/aliases api {pull}23698[#23698] (issue: {issue}23661[#23661]) - -Allocation:: -* Trigger replica recovery restarts by master when primary relocation completes {pull}23926[#23926] (issue: {issue}23904[#23904]) -* Makes the same_shard host dynamically updatable {pull}23397[#23397] (issue: {issue}22992[#22992]) -* Include stale replica shard info when explaining an unassigned primary {pull}22826[#22826] -* Adds setting level to allocation decider explanations {pull}22268[#22268] (issue: {issue}21771[#21771]) -* Improves allocation decider decision explanation messages {pull}21771[#21771] -* Prepares allocator decision objects for use with the allocation explain API {pull}21691[#21691] -* Balance step in BalancedShardsAllocator for a single shard {pull}21103[#21103] -* Process more expensive allocation deciders last {pull}20724[#20724] (issue: {issue}12815[#12815]) -* Separates decision making from decision application in BalancedShardsAllocator {pull}20634[#20634] - -Analysis:: -* Support Keyword type in Analyze API {pull}23161[#23161] -* Expose FlattenGraphTokenFilter {pull}22643[#22643] -* Analyze API Position Length Support {pull}22574[#22574] -* Remove AnalysisService and reduce it to a simple name to analyzer mapping {pull}20627[#20627] (issues: {issue}19827[#19827], {issue}19828[#19828]) - -CAT API:: -* Adding built-in sorting capability to _cat apis. {pull}20658[#20658] (issue: {issue}16975[#16975]) -* Add health status parameter to cat indices API {pull}20393[#20393] - -CRUD:: -* Use correct block levels for TRA subclasses {pull}22224[#22224] -* Make index and delete operation execute as a single bulk item {pull}21964[#21964] - -Cache:: -* Do not cache term queries. {pull}21566[#21566] (issues: {issue}16031[#16031], {issue}20116[#20116]) -* Parse alias filters on the coordinating node {pull}20916[#20916] - -Circuit Breakers:: -* Closing a ReleasableBytesStreamOutput closes the underlying BigArray {pull}23941[#23941] -* Add used memory amount to CircuitBreakingException message (#22521) {pull}22693[#22693] (issue: {issue}22521[#22521]) -* Cluster Settings Updates should not trigger circuit breakers. {pull}20827[#20827] - -Cluster:: -* Extract a common base class to allow services to listen to remote cluster config updates {pull}24367[#24367] -* Prevent nodes from joining if newer indices exist in the cluster {pull}23843[#23843] -* Connect to new nodes concurrently {pull}22984[#22984] (issue: {issue}22828[#22828]) -* Keep NodeConnectionsService in sync with current nodes in the cluster state {pull}22509[#22509] -* Add a generic way of checking version before serializing custom cluster object {pull}22376[#22376] (issue: {issue}22313[#22313]) -* Add validation for supported index version on node join, restore, upgrade & open index {pull}21830[#21830] (issue: {issue}21670[#21670]) -* Let ClusterStateObserver only hold onto state that's needed for change detection {pull}21631[#21631] (issue: {issue}21568[#21568]) -* Cache successful shard deletion checks {pull}21438[#21438] -* Remove mutable status field from cluster state {pull}21379[#21379] -* Skip shard management code when updating cluster state on client/tribe nodes {pull}20731[#20731] -* Add clusterUUID to RestMainAction output {pull}20503[#20503] - -Core:: -* Regex upgrades {pull}24316[#24316] (issue: {issue}24226[#24226]) -* Detect remnants of path.data/default.path.data bug {pull}24099[#24099] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074], {issue}24093[#24093]) -* Await termination after shutting down executors {pull}23889[#23889] -* Add early-access check {pull}23743[#23743] (issue: {issue}23668[#23668]) -* Adapter action future should restore interrupts {pull}23618[#23618] (issue: {issue}23617[#23617]) -* Disable bootstrap checks for single-node discovery {pull}23598[#23598] (issues: {issue}23585[#23585], {issue}23595[#23595]) -* Enable explicitly enforcing bootstrap checks {pull}23585[#23585] (issue: {issue}21864[#21864]) -* Add equals/hashcode method to ReplicationResponse {pull}23215[#23215] -* Simplify ElasticsearchException rendering as a XContent {pull}22611[#22611] -* Remove setLocalNode from ClusterService and TransportService {pull}22608[#22608] -* Rename bootstrap.seccomp to bootstrap.system_call_filter {pull}22226[#22226] (issue: {issue}21940[#21940]) -* Cleanup random stats serialization code {pull}22223[#22223] -* Avoid corruption when deserializing booleans {pull}22152[#22152] -* Reduce memory pressure when sending large terms queries. {pull}21776[#21776] -* Install a security manager on startup {pull}21716[#21716] -* Log node ID on startup {pull}21673[#21673] -* Ensure source filtering automatons are only compiled once {pull}20857[#20857] (issue: {issue}20839[#20839]) -* Improve scheduling fairness when batching cluster state changes with equal priority {pull}20775[#20775] (issue: {issue}20768[#20768]) -* Add production warning for pre-release builds {pull}20674[#20674] -* Add serial collector bootstrap check {pull}20558[#20558] -* Do not log full bootstrap checks exception {pull}19989[#19989] - -Dates:: -* Improve error handling for epoch format parser with time zone (#22621) {pull}23689[#23689] - -Discovery:: -* Introduce single-node discovery {pull}23595[#23595] -* UnicastZenPing shouldn't ping the address of the local node {pull}23567[#23567] -* MasterFaultDetection can start after the initial cluster state has been processed {pull}23037[#23037] (issue: {issue}22828[#22828]) -* Simplify Unicast Zen Ping {pull}22277[#22277] (issues: {issue}19370[#19370], {issue}21739[#21739], {issue}22120[#22120], {issue}22194[#22194]) -* Prefer joining node with conflicting transport address when becoming master {pull}22134[#22134] (issues: {issue}22049[#22049], {issue}22120[#22120]) - -Engine:: -* Engine: store maxUnsafeAutoIdTimestamp in commit {pull}24149[#24149] -* Replace EngineClosedException with AlreadyClosedExcpetion {pull}22631[#22631] - -Exceptions:: -* Add BWC layer for Exceptions {pull}21694[#21694] (issue: {issue}21656[#21656]) - -Geo:: -* Optimize geo-distance sorting. {pull}20596[#20596] (issue: {issue}20450[#20450]) - -Highlighting:: -* Add support for fragment_length in the unified highlighter {pull}23431[#23431] -* Add BreakIteratorBoundaryScanner support {pull}23248[#23248] - -Index APIs:: -* Open and close index to honour allow_no_indices option {pull}24222[#24222] (issue: {issue}24031[#24031]) -* Wildcard cluster names for cross cluster search {pull}23985[#23985] (issue: {issue}23893[#23893]) -* Indexing: Add shard id to indexing operation listener {pull}22606[#22606] -* Better error when can't auto create index {pull}22488[#22488] (issues: {issue}21448[#21448], {issue}22435[#22435]) -* Add date-math support to `_rollover` {pull}20709[#20709] - -Ingest:: -* Lazy load the geoip databases {pull}23337[#23337] -* add `ignore_missing` flag to ingest plugins {pull}22273[#22273] -* Added ability to remove pipelines via wildcards (#22149) {pull}22191[#22191] (issue: {issue}22149[#22149]) -* Enables the ability to inject serialized json fields into root of document {pull}22179[#22179] (issue: {issue}21898[#21898]) -* compile ScriptProcessor inline scripts when creating ingest pipelines {pull}21858[#21858] (issue: {issue}21842[#21842]) -* add `ignore_missing` option to SplitProcessor {pull}20982[#20982] (issues: {issue}19995[#19995], {issue}20840[#20840]) -* add ignore_missing option to convert,trim,lowercase,uppercase,grok,rename {pull}20194[#20194] (issue: {issue}19995[#19995]) -* introduce the JSON Processor {pull}20128[#20128] (issue: {issue}20052[#20052]) - -Internal:: -* Add cross cluster support to `_field_caps` {pull}24463[#24463] (issue: {issue}24334[#24334]) -* Log JVM arguments on startup {pull}24451[#24451] -* Preserve cluster alias throughout search execution to lookup nodes by cluster and ID {pull}24438[#24438] -* Move RemoteClusterService into TransportService {pull}24424[#24424] -* Enum related performance additions. {pull}24274[#24274] (issue: {issue}24226[#24226]) -* Add a dedicated TransportRemoteInfoAction for consistency {pull}24040[#24040] (issue: {issue}23969[#23969]) -* Simplify sorted top docs merging in SearchPhaseController {pull}23881[#23881] -* Synchronized CollapseTopFieldDocs with lucenes relatives {pull}23854[#23854] -* Cleanup SearchPhaseController interface {pull}23844[#23844] -* Do not create String instances in 'Strings' methods accepting StringBuilder {pull}22907[#22907] -* Improve connection closing in `RemoteClusterConnection` {pull}22804[#22804] (issue: {issue}22803[#22803]) -* Remove some more usages of ParseFieldMatcher {pull}22437[#22437] (issues: {issue}19552[#19552], {issue}22130[#22130]) -* Remove some more usages of ParseFieldMatcher {pull}22398[#22398] (issues: {issue}19552[#19552], {issue}22130[#22130]) -* Remove some more usages of ParseFieldMatcher {pull}22395[#22395] (issues: {issue}19552[#19552], {issue}22130[#22130]) -* Remove some ParseFieldMatcher usages {pull}22389[#22389] (issues: {issue}19552[#19552], {issue}22130[#22130]) -* Introduce ToXContentObject interface {pull}22387[#22387] (issue: {issue}16347[#16347]) -* Add infrastructure to manage network connections outside of Transport/TransportService {pull}22194[#22194] -* Replace strict parsing mode with response headers assertions {pull}22130[#22130] (issues: {issue}11859[#11859], {issue}19552[#19552], {issue}20993[#20993]) -* Start using `ObjectParser` for aggs. {pull}22048[#22048] (issue: {issue}22009[#22009]) -* Don't output null source node in RecoveryFailedException {pull}21963[#21963] -* ClusterService should expose "applied" cluster states (i.e., remove ClusterStateStatus) {pull}21817[#21817] -* Rename ClusterState#lookupPrototypeSafe to `lookupPrototype` and remove "unsafe" unused variant {pull}21686[#21686] -* ShardActiveResponseHandler shouldn't hold to an entire cluster state {pull}21470[#21470] (issue: {issue}21394[#21394]) -* Remove unused ClusterService dependency from SearchPhaseController {pull}21421[#21421] -* Remove special case in case no action filters are registered {pull}21251[#21251] -* Use TimveValue instead of long for CacheBuilder methods {pull}20887[#20887] -* Remove SearchContext#current and all it's threadlocals {pull}20778[#20778] (issue: {issue}19341[#19341]) -* Remove poor-mans compression in InternalSearchHit and friends {pull}20472[#20472] - -Java API:: -* Added types options to DeleteByQueryRequest {pull}23265[#23265] (issue: {issue}21984[#21984]) -* prevent NPE when trying to uncompress a null BytesReference {pull}22386[#22386] - -Java High Level REST Client:: -* Add utility method to parse named XContent objects with typed prefix {pull}24240[#24240] (issue: {issue}22965[#22965]) -* Convert suggestion response parsing to use NamedXContentRegistry {pull}23355[#23355] -* UpdateRequest implements ToXContent {pull}23289[#23289] -* Add javadoc for DocWriteResponse.Builders {pull}23267[#23267] -* Expose WriteRequest.RefreshPolicy string representation {pull}23106[#23106] -* Use `typed_keys` parameter to prefix suggester names by type in search responses {pull}23080[#23080] (issue: {issue}22965[#22965]) -* Add parsing from xContent to MainResponse {pull}22934[#22934] -* Parse elasticsearch exception's root causes {pull}22924[#22924] -* Add parsing method to BytesRestResponse's error {pull}22873[#22873] -* Add parsing methods to BulkItemResponse {pull}22859[#22859] -* Add parsing method for ElasticsearchException.generateFailureXContent() {pull}22815[#22815] -* Add parsing method for ElasticsearchException.generateThrowableXContent() {pull}22783[#22783] -* Add parsing methods for UpdateResponse {pull}22586[#22586] -* Add parsing from xContent to InternalSearchHit and InternalSearchHits {pull}22429[#22429] -* Add fromxcontent methods to index response {pull}22229[#22229] -* Add fromXContent() methods for ReplicationResponse {pull}22196[#22196] (issue: {issue}22082[#22082]) -* Add parsing method for ElasticsearchException {pull}22143[#22143] -* Add fromXContent method to GetResponse {pull}22082[#22082] - -Java REST Client:: -* move ignore parameter support from yaml test client to low level rest client {pull}22637[#22637] -* Warn log deprecation warnings received from server {pull}21895[#21895] -* Support Preemptive Authentication with RestClient {pull}21336[#21336] -* Provide error message when rest request path is null {pull}21233[#21233] (issue: {issue}21232[#21232]) - -Logging:: -* Log deleting indices at info level {pull}22627[#22627] (issue: {issue}22605[#22605]) -* Expose logs base path {pull}22625[#22625] -* Log failure to connect to node at info instead of debug {pull}21809[#21809] (issue: {issue}6468[#6468]) -* Truncate log messages from the end {pull}21609[#21609] (issue: {issue}21602[#21602]) -* Ensure logging is initialized in CLI tools {pull}20575[#20575] -* Give useful error message if log config is missing {pull}20493[#20493] -* Complete Elasticsearch logger names {pull}20457[#20457] (issue: {issue}20326[#20326]) -* Logging shutdown hack {pull}20389[#20389] (issue: {issue}20304[#20304]) -* Disable console logging {pull}20387[#20387] -* Warn on not enough masters during election {pull}20063[#20063] (issue: {issue}8362[#8362]) - -Mapping:: -* Do not index `_type` when there is at most one type. {pull}24363[#24363] -* Only allow one type on 6.0 indices {pull}24317[#24317] (issue: {issue}15613[#15613]) -* token_count type : add an option to count tokens (fix #23227) {pull}24175[#24175] (issue: {issue}23227[#23227]) -* Atomic mapping updates across types {pull}22220[#22220] -* Only update DocumentMapper if field type changes {pull}22165[#22165] -* Better error message when _parent isn't an object {pull}21987[#21987] -* Create the QueryShardContext lazily in DocumentMapperParser. {pull}21287[#21287] - -Nested Docs:: -* Avoid adding unnecessary nested filters when ranges are used. {pull}23427[#23427] - -Network:: -* Set available processors for Netty {pull}24420[#24420] (issue: {issue}6224[#6224]) -* Adjust default Netty receive predictor size to 64k {pull}23542[#23542] (issue: {issue}23185[#23185]) -* Keep the pipeline handler queue small initially {pull}23335[#23335] -* Set network receive predictor size to 32kb {pull}23284[#23284] (issue: {issue}23185[#23185]) -* TransportService.connectToNode should validate remote node ID {pull}22828[#22828] (issue: {issue}22194[#22194]) -* Disable the Netty recycler {pull}22452[#22452] (issues: {issue}22189[#22189], {issue}22360[#22360], {issue}22406[#22406], {issue}5904[#5904]) -* Tell Netty not to be unsafe in transport client {pull}22284[#22284] -* Introduce a low level protocol handshake {pull}22094[#22094] -* Detach handshake from connect to node {pull}22037[#22037] -* Reduce number of connections per node depending on the nodes role {pull}21849[#21849] -* Add a connect timeout to the ConnectionProfile to allow per node connect timeouts {pull}21847[#21847] (issue: {issue}19719[#19719]) -* Grant Netty permission to read system somaxconn {pull}21840[#21840] -* Remove connectToNodeLight and replace it with a connection profile {pull}21799[#21799] -* Lazy resolve unicast hosts {pull}21630[#21630] (issues: {issue}14441[#14441], {issue}16412[#16412]) -* Fix handler name on message not fully read {pull}21478[#21478] -* Handle rejected pings on shutdown gracefully {pull}20842[#20842] -* Network: Allow to listen on virtual interfaces. {pull}19568[#19568] (issues: {issue}17473[#17473], {issue}19537[#19537]) - -Packaging:: -* Introduce Java version check {pull}23194[#23194] (issue: {issue}21102[#21102]) -* Improve the out-of-the-box experience {pull}21920[#21920] (issues: {issue}18317[#18317], {issue}21783[#21783]) -* Add empty plugins dir for archive distributions {pull}21204[#21204] (issue: {issue}20342[#20342]) -* Make explicit missing settings for Windows service {pull}21200[#21200] (issue: {issue}18317[#18317]) -* Change permissions on config files {pull}20966[#20966] -* Add quiet option to disable console logging {pull}20422[#20422] (issues: {issue}15315[#15315], {issue}16159[#16159], {issue}17220[#17220]) - -Percolator:: -* Allowing range queries with now ranges inside percolator queries {pull}23921[#23921] (issue: {issue}23859[#23859]) -* Add term extraction support for MultiPhraseQuery {pull}23176[#23176] - -Plugin Discovery EC2:: -* Settings: Migrate ec2 discovery sensitive settings to elasticsearch keystore {pull}23961[#23961] (issue: {issue}22475[#22475]) -* Add support for ca-central-1 region to EC2 and S3 plugins {pull}22458[#22458] (issue: {issue}22454[#22454]) -* Support for eu-west-2 (London) cloud-aws plugin {pull}22308[#22308] (issue: {issue}22306[#22306]) -* Add us-east-2 AWS region {pull}21961[#21961] (issue: {issue}21881[#21881]) -* Add setting to set read timeout for EC2 discovery and S3 repository plugins {pull}21956[#21956] (issue: {issue}19078[#19078]) - -Plugin Ingest GeoIp:: -* Cache results of geoip lookups {pull}22231[#22231] (issue: {issue}22074[#22074]) - -Plugin Lang Painless:: -* Allow painless to load stored fields {pull}24290[#24290] -* Start on custom whitelists for Painless {pull}23563[#23563] -* Fix Painless's implementation of interfaces returning primitives {pull}23298[#23298] (issue: {issue}22983[#22983]) -* Allow painless to implement more interfaces {pull}22983[#22983] -* Generate reference links for painless API {pull}22775[#22775] -* Painless: Add augmentation to String for base 64 {pull}22665[#22665] (issue: {issue}22648[#22648]) -* Improve painless's ScriptException generation {pull}21762[#21762] (issue: {issue}21733[#21733]) -* Add Debug.explain to painless {pull}21723[#21723] (issue: {issue}20263[#20263]) -* Implement the ?: operator in painless {pull}21506[#21506] -* In painless suggest a long constant if int won't do {pull}21415[#21415] (issue: {issue}21313[#21313]) -* Support decimal constants with trailing [dD] in painless {pull}21412[#21412] (issue: {issue}21116[#21116]) -* Implement reading from null safe dereferences {pull}21239[#21239] -* Painless negative offsets {pull}21080[#21080] (issue: {issue}20870[#20870]) -* Remove more equivalents of the now method from the Painless whitelist. {pull}21047[#21047] -* Disable regexes by default in painless {pull}20427[#20427] (issue: {issue}20397[#20397]) - -Plugin Repository Azure:: -* Add Backoff policy to azure repository {pull}23387[#23387] (issue: {issue}22728[#22728]) - -Plugin Repository S3:: -* Removes the retry mechanism from the S3 blob store {pull}23952[#23952] (issue: {issue}22845[#22845]) -* S3 Repository: Eagerly load static settings {pull}23910[#23910] -* S3 repository: Add named configurations {pull}22762[#22762] (issues: {issue}22479[#22479], {issue}22520[#22520]) -* Make the default S3 buffer size depend on the available memory. {pull}21299[#21299] - -Plugins:: -* Plugins: Add support for platform specific plugins {pull}24265[#24265] -* Plugins: Remove leniency for missing plugins dir {pull}24173[#24173] -* Modify permissions dialog for plugins {pull}23742[#23742] -* Plugins: Add plugin cli specific exit codes {pull}23599[#23599] (issue: {issue}15295[#15295]) -* Plugins: Output better error message when existing plugin is incompatible {pull}23562[#23562] (issue: {issue}20691[#20691]) -* Add the ability to define search response listeners in search plugin {pull}22682[#22682] -* Pass ThreadContext to transport interceptors to allow header modification {pull}22618[#22618] (issue: {issue}22585[#22585]) -* Provide helpful error message if a plugin exists {pull}22305[#22305] (issue: {issue}22084[#22084]) -* Add shutdown hook for closing CLI commands {pull}22126[#22126] (issue: {issue}22111[#22111]) -* Allow plugins to install bootstrap checks {pull}22110[#22110] -* Clarify that plugins can be closed {pull}21669[#21669] -* Plugins: Convert custom discovery to pull based plugin {pull}21398[#21398] -* Removing plugin that isn't installed shouldn't trigger usage information {pull}21272[#21272] (issue: {issue}21250[#21250]) -* Remove pluggability of ZenPing {pull}21049[#21049] -* Make UnicastHostsProvider extension pull based {pull}21036[#21036] -* Revert "Display plugins versions" {pull}20807[#20807] (issues: {issue}18683[#18683], {issue}20668[#20668]) -* Provide error message when plugin id is missing {pull}20660[#20660] - -Query DSL:: -* Make it possible to validate a query on all shards instead of a single random shard {pull}23697[#23697] (issue: {issue}18254[#18254]) -* QueryString and SimpleQueryString Graph Support {pull}22541[#22541] -* Additional Graph Support in Match Query {pull}22503[#22503] (issue: {issue}22490[#22490]) -* RangeQuery WITHIN case now normalises query {pull}22431[#22431] (issue: {issue}22412[#22412]) -* Un-deprecate fuzzy query {pull}22088[#22088] (issue: {issue}15760[#15760]) -* support numeric bounds with decimal parts for long/integer/short/byte datatypes {pull}21972[#21972] (issue: {issue}21600[#21600]) -* Using ObjectParser in MatchAllQueryBuilder and IdsQueryBuilder {pull}21273[#21273] -* Expose splitOnWhitespace in `Query String Query` {pull}20965[#20965] (issue: {issue}20841[#20841]) -* Throw error if query element doesn't end with END_OBJECT {pull}20528[#20528] (issue: {issue}20515[#20515]) -* Remove `lowercase_expanded_terms` and `locale` from query-parser options. {pull}20208[#20208] (issue: {issue}9978[#9978]) - -REST:: -* Allow passing single scrollID in clear scroll API body {pull}24242[#24242] (issue: {issue}24233[#24233]) -* Validate top-level keys when parsing mget requests {pull}23746[#23746] (issue: {issue}23720[#23720]) -* Cluster stats should not render empty http/transport types {pull}23735[#23735] -* Add parameter to prefix aggs name with type in search responses {pull}22965[#22965] -* Add a REST spec for the create API {pull}20924[#20924] -* Add response params to REST params did you mean {pull}20753[#20753] (issues: {issue}20722[#20722], {issue}20747[#20747]) -* Add did you mean to strict REST params {pull}20747[#20747] (issue: {issue}20722[#20722]) - -Reindex API:: -* Increase visibility of doExecute so it can be used directly {pull}22614[#22614] -* Improve error message when reindex-from-remote gets bad json {pull}22536[#22536] (issue: {issue}22330[#22330]) -* Reindex: Better error message for pipeline in wrong place {pull}21985[#21985] -* Timeout improvements for rest client and reindex {pull}21741[#21741] (issue: {issue}21707[#21707]) -* Add "simple match" support for reindex-from-remote whitelist {pull}21004[#21004] -* Make reindex-from-remote ignore unknown fields {pull}20591[#20591] (issue: {issue}20504[#20504]) - -Scripting:: -* Expose multi-valued dates to scripts and document painless's date functions {pull}22875[#22875] (issue: {issue}22162[#22162]) -* Wrap VerifyError in ScriptException {pull}21769[#21769] -* Log ScriptException's xcontent if file script compilation fails {pull}21767[#21767] (issue: {issue}21733[#21733]) -* Support binary field type in script values {pull}21484[#21484] (issue: {issue}14469[#14469]) -* Mustache: Add {{#url}}{{/url}} function to URL encode strings {pull}20838[#20838] -* Expose `ctx._now` in update scripts {pull}20835[#20835] (issue: {issue}17895[#17895]) - -Search:: -* Remove leniency when merging fetched hits in a search response phase {pull}24158[#24158] -* Set shard count limit to unlimited {pull}24012[#24012] -* Streamline shard index availability in all SearchPhaseResults {pull}23788[#23788] -* Search took time should use a relative clock {pull}23662[#23662] -* Prevent negative `from` parameter in SearchSourceBuilder {pull}23358[#23358] (issue: {issue}23324[#23324]) -* Remove unnecessary result sorting in SearchPhaseController {pull}23321[#23321] -* Expose `batched_reduce_size` via `_search` {pull}23288[#23288] (issue: {issue}23253[#23253]) -* Adding fromXContent to Suggest and Suggestion class {pull}23226[#23226] (issue: {issue}23202[#23202]) -* Adding fromXContent to Suggestion.Entry and subclasses {pull}23202[#23202] -* Add CollapseSearchPhase as a successor for the FetchSearchPhase {pull}23165[#23165] -* Integrate IndexOrDocValuesQuery. {pull}23119[#23119] -* Detach SearchPhases from AbstractSearchAsyncAction {pull}23118[#23118] -* Fix GraphQuery expectation after Lucene upgrade to 6.5 {pull}23117[#23117] (issue: {issue}23102[#23102]) -* Nested queries should avoid adding unnecessary filters when possible. {pull}23079[#23079] (issue: {issue}20797[#20797]) -* Add xcontent parsing to completion suggestion option {pull}23071[#23071] -* Add xcontent parsing to suggestion options {pull}23018[#23018] -* Separate reduce (aggs, suggest and profile) from merging fetched hits {pull}23017[#23017] -* Add a setting to disable remote cluster connections on a node {pull}23005[#23005] -* First step towards separating individual search phases {pull}22802[#22802] -* Add parsing from xContent to SearchProfileShardResults and nested classes {pull}22649[#22649] -* Move SearchTransportService and SearchPhaseController creation outside of TransportSearchAction constructor {pull}21754[#21754] -* Don't carry ShardRouting around when not needed in AbstractSearchAsyncAction {pull}21753[#21753] -* ShardSearchRequest to take ShardId constructor argument rather than the whole ShardRouting {pull}21750[#21750] -* Use index uuid as key in the alias filter map rather than the index name {pull}21749[#21749] -* Add indices and filter information to search shards api output {pull}21738[#21738] (issue: {issue}20916[#20916]) -* remove pointless catch exception in TransportSearchAction {pull}21689[#21689] -* Optimize query with types filter in the URL (t/t/_search) {pull}20979[#20979] -* Makes search action cancelable by task management API {pull}20405[#20405] - -Search Templates:: -* Add profile and explain parameters to template API {pull}20451[#20451] - -Settings:: -* Add secure file setting to keystore {pull}24001[#24001] -* Add a property to mark setting as final {pull}23872[#23872] -* Remove obsolete index setting `index.version.minimum_compatible`. {pull}23593[#23593] -* Provide a method to retrieve a closeable char[] from a SecureString {pull}23389[#23389] -* Update indices settings api to support CBOR and SMILE format {pull}23309[#23309] (issues: {issue}23242[#23242], {issue}23245[#23245]) -* Improve setting deprecation message {pull}23156[#23156] (issue: {issue}22849[#22849]) -* Add secure settings validation on startup {pull}22894[#22894] -* Allow comma delimited array settings to have a space after each entry {pull}22591[#22591] (issue: {issue}22297[#22297]) -* Allow affix settings to be dynamic / updatable {pull}22526[#22526] -* Allow affix settings to delegate to actual settings {pull}22523[#22523] -* Make s3 repository sensitive settings use secure settings {pull}22479[#22479] -* Speed up filter and prefix settings operations {pull}22249[#22249] -* Add precise logging on unknown or invalid settings {pull}20951[#20951] (issue: {issue}20946[#20946]) - -Snapshot/Restore:: -* Ensure every repository has an incompatible-snapshots blob {pull}24403[#24403] (issue: {issue}22267[#22267]) -* Change snapshot status error to use generic SnapshotException {pull}24355[#24355] (issue: {issue}24225[#24225]) -* Duplicate snapshot name throws InvalidSnapshotNameException {pull}22921[#22921] (issue: {issue}18228[#18228]) -* Fixes retrieval of the latest snapshot index blob {pull}22700[#22700] -* Use general cluster state batching mechanism for snapshot state updates {pull}22528[#22528] (issue: {issue}14899[#14899]) -* Synchronize snapshot deletions on the cluster state {pull}22313[#22313] (issue: {issue}19957[#19957]) -* Abort snapshots on a node that leaves the cluster {pull}21084[#21084] (issue: {issue}20876[#20876]) - -Stats:: -* Show JVM arguments {pull}24450[#24450] -* Add cross-cluster search remote cluster info API {pull}23969[#23969] (issue: {issue}23925[#23925]) -* Add geo_point to FieldStats {pull}21947[#21947] (issue: {issue}20707[#20707]) -* Include unindexed field in FieldStats response {pull}21821[#21821] (issue: {issue}21952[#21952]) -* Remove load average leniency {pull}21380[#21380] -* Strengthen handling of unavailable cgroup stats {pull}21094[#21094] (issue: {issue}21029[#21029]) -* Add basic cgroup CPU metrics {pull}21029[#21029] - -Suggesters:: -* Provide informative error message in case of unknown suggestion context. {pull}24241[#24241] -* Allow different data types for category in Context suggester {pull}23491[#23491] (issue: {issue}22358[#22358]) - -Task Manager:: -* Limit IndexRequest toString() length {pull}22832[#22832] -* Improve the error message if task and node isn't found {pull}22062[#22062] (issue: {issue}22027[#22027]) -* Add descriptions to create snapshot and restore snapshot tasks. {pull}21901[#21901] (issue: {issue}21768[#21768]) -* Add proper descriptions to reindex, update-by-query and delete-by-query tasks. {pull}21841[#21841] (issue: {issue}21768[#21768]) -* Add search task descriptions {pull}21740[#21740] - -Tribe Node:: -* Add support for merging custom meta data in tribe node {pull}21552[#21552] (issues: {issue}20544[#20544], {issue}20791[#20791], {issue}9372[#9372]) - -Upgrade API:: -* Allow plugins to upgrade templates and index metadata on startup {pull}24379[#24379] - - -[[bug-6.0.0-alpha1-5x]] -[float] -=== Bug fixes - -Aggregations:: -* InternalPercentilesBucket should not rely on ordered percents array {pull}24336[#24336] (issue: {issue}24331[#24331]) -* Align behavior HDR percentiles iterator with percentile() method {pull}24206[#24206] -* The `filter` and `significant_terms` aggregations should parse the `filter` as a filter, not a query. {pull}23797[#23797] -* Completion suggestion should also consider text if prefix/regex is missing {pull}23451[#23451] (issue: {issue}23340[#23340]) -* Fixes the per term error in the terms aggregation {pull}23399[#23399] -* Fixes terms error count for multiple reduce phases {pull}23291[#23291] (issue: {issue}23286[#23286]) -* Fix scaled_float numeric type in aggregations {pull}22351[#22351] (issue: {issue}22350[#22350]) -* Allow terms aggregations on pure boolean scripts. {pull}22201[#22201] (issue: {issue}20941[#20941]) -* Fix numeric terms aggregations with includes/excludes and minDocCount=0 {pull}22141[#22141] (issue: {issue}22140[#22140]) -* Fix `missing` on aggs on `boolean` fields. {pull}22135[#22135] (issue: {issue}22009[#22009]) -* IP range masks exclude the maximum address of the range. {pull}22018[#22018] (issue: {issue}22005[#22005]) -* Fix `other_bucket` on the `filters` agg to be enabled if a key is set. {pull}21994[#21994] (issue: {issue}21951[#21951]) -* Rewrite Queries/Filter in FilterAggregationBuilder and ensure client usage marks query as non-cachable {pull}21303[#21303] (issue: {issue}21301[#21301]) -* Percentiles bucket fails for 100th percentile {pull}21218[#21218] -* Thread safety for scripted significance heuristics {pull}21113[#21113] (issue: {issue}18120[#18120]) -* `ip_range` aggregation should accept null bounds. {pull}21043[#21043] (issue: {issue}21006[#21006]) -* Fixes bug preventing script sort working on top_hits aggregation {pull}21023[#21023] (issue: {issue}21022[#21022]) -* Fixed writeable name from range to geo_distance {pull}20860[#20860] -* Fix date_range aggregation to not cache if now is used {pull}20740[#20740] -* The `top_hits` aggregation should compile scripts only once. {pull}20738[#20738] - -Allocation:: -* Discard stale node responses from async shard fetching {pull}24434[#24434] (issue: {issue}24007[#24007]) -* Cannot force allocate primary to a node where the shard already exists {pull}22031[#22031] (issue: {issue}22021[#22021]) -* Promote shadow replica to primary when initializing primary fails {pull}22021[#22021] -* Trim in-sync allocations set only when it grows {pull}21976[#21976] (issue: {issue}21719[#21719]) -* Allow master to assign primary shard to node that has shard store locked during shard state fetching {pull}21656[#21656] (issue: {issue}19416[#19416]) -* Keep a shadow replicas' allocation id when it is promoted to primary {pull}20863[#20863] (issue: {issue}20650[#20650]) -* IndicesClusterStateService should clean local started when re-assigns an initializing shard with the same aid {pull}20687[#20687] -* IndexRoutingTable.initializeEmpty shouldn't override supplied primary RecoverySource {pull}20638[#20638] (issue: {issue}20637[#20637]) -* Update incoming recoveries stats when shadow replica is reinitialized {pull}20612[#20612] -* `index.routing.allocation.initial_recovery` limits replica allocation {pull}20589[#20589] - -Analysis:: -* AsciiFoldingFilter's multi-term component should never preserve the original token. {pull}21982[#21982] -* Pre-built analysis factories do not implement MultiTermAware correctly. {pull}21981[#21981] -* Can load non-PreBuiltTokenFilter in Analyze API {pull}20396[#20396] -* Named analyzer should close the analyzer that it wraps {pull}20197[#20197] - -Bulk:: -* Reject empty IDs {pull}24118[#24118] (issue: {issue}24116[#24116]) - -CAT API:: -* Consume `full_id` request parameter early {pull}21270[#21270] (issue: {issue}21266[#21266]) - -CRUD:: -* Reject external versioning and explicit version numbers on create {pull}21998[#21998] -* MultiGet should not fail entirely if alias resolves to many indices {pull}20858[#20858] (issue: {issue}20845[#20845]) -* Fixed date math expression support in multi get requests. {pull}20659[#20659] (issue: {issue}17957[#17957]) - -Cache:: -* Invalidate cached query results if query timed out {pull}22807[#22807] (issue: {issue}22789[#22789]) -* Fix the request cache keys to not hold references to the SearchContext. {pull}21284[#21284] -* Prevent requests that use scripts or now() from being cached {pull}20750[#20750] (issue: {issue}20645[#20645]) - -Circuit Breakers:: -* ClusterState publishing shouldn't trigger circuit breakers {pull}20986[#20986] (issues: {issue}20827[#20827], {issue}20960[#20960]) - -Cluster:: -* Don't set local node on cluster state used for node join validation {pull}23311[#23311] (issues: {issue}21830[#21830], {issue}3[#3], {issue}4[#4], {issue}6[#6], {issue}9[#9]) -* Allow a cluster state applier to create an observer and wait for a better state {pull}23132[#23132] (issue: {issue}21817[#21817]) -* Cluster allocation explain to never return empty response body {pull}23054[#23054] -* IndicesService handles all exceptions during index deletion {pull}22433[#22433] -* Remove cluster update task when task times out {pull}21578[#21578] (issue: {issue}21568[#21568]) - -Core:: -* Check for default.path.data included in path.data {pull}24285[#24285] (issue: {issue}24283[#24283]) -* Improve performance of extracting warning value {pull}24114[#24114] (issue: {issue}24018[#24018]) -* Reject duplicate settings on the command line {pull}24053[#24053] -* Restrict build info loading to ES jar, not any jar {pull}24049[#24049] (issue: {issue}21955[#21955]) -* Streamline foreign stored context restore and allow to perserve response headers {pull}22677[#22677] (issue: {issue}22647[#22647]) -* Support negative numbers in readVLong {pull}22314[#22314] -* Add a StreamInput#readArraySize method that ensures sane array sizes {pull}21697[#21697] -* Use a buffer to do character to byte conversion in StreamOutput#writeString {pull}21680[#21680] (issue: {issue}21660[#21660]) -* Fix ShardInfo#toString {pull}21319[#21319] -* Protect BytesStreamOutput against overflows of the current number of written bytes. {pull}21174[#21174] (issue: {issue}21159[#21159]) -* Return target index name even if _rollover conditions are not met {pull}21138[#21138] -* .es_temp_file remains after system crash, causing it not to start again {pull}21007[#21007] (issue: {issue}20992[#20992]) -* StoreStatsCache should also ignore AccessDeniedException when checking file size {pull}20790[#20790] (issue: {issue}17580[#17580]) - -Dates:: -* Fix time zone rounding edge case for DST overlaps {pull}21550[#21550] (issue: {issue}20833[#20833]) - -Discovery:: -* ZenDiscovery - only validate min_master_nodes values if local node is master {pull}23915[#23915] (issue: {issue}23695[#23695]) -* Close InputStream when receiving cluster state in PublishClusterStateAction {pull}22711[#22711] -* Do not reply to pings from another cluster {pull}21894[#21894] (issue: {issue}21874[#21874]) -* Add current cluster state version to zen pings and use them in master election {pull}20384[#20384] (issue: {issue}20348[#20348]) - -Engine:: -* Close and flush refresh listeners on shard close {pull}22342[#22342] -* Die with dignity on the Lucene layer {pull}21721[#21721] (issue: {issue}19272[#19272]) -* Fix `InternalEngine#isThrottled` to not always return `false`. {pull}21592[#21592] -* Retrying replication requests on replica doesn't call `onRetry` {pull}21189[#21189] (issue: {issue}20211[#20211]) -* Take refresh IOExceptions into account when catching ACE in InternalEngine {pull}20546[#20546] (issue: {issue}19975[#19975]) - -Exceptions:: -* Stop returning "es." internal exception headers as http response headers {pull}22703[#22703] (issue: {issue}17593[#17593]) -* Fixing shard recovery error message to report the number of docs correctly for each node {pull}22515[#22515] (issue: {issue}21893[#21893]) - -Highlighting:: -* Fix FiltersFunctionScoreQuery highlighting {pull}21827[#21827] -* Fix highlighting on a stored keyword field {pull}21645[#21645] (issue: {issue}21636[#21636]) -* Fix highlighting of MultiTermQuery within a FunctionScoreQuery {pull}20400[#20400] (issue: {issue}20392[#20392]) - -Index APIs:: -* Fixes restore of a shrunken index when initial recovery node is gone {pull}24322[#24322] (issue: {issue}24257[#24257]) -* Honor update request timeout {pull}23825[#23825] -* Ensure shrunk indices carry over version information from its source {pull}22469[#22469] (issue: {issue}22373[#22373]) -* Validate the `_rollover` target index name early to also fail if dry_run=true {pull}21330[#21330] (issue: {issue}21149[#21149]) -* Only negate index expression on all indices with preceding wildcard {pull}20898[#20898] (issues: {issue}19800[#19800], {issue}20033[#20033]) -* Fix IndexNotFoundException in multi index search request. {pull}20188[#20188] (issue: {issue}3839[#3839]) - -Index Templates:: -* Fix integer overflows when dealing with templates. {pull}21628[#21628] (issue: {issue}21622[#21622]) - -Ingest:: -* Improve missing ingest processor error {pull}23379[#23379] (issue: {issue}23392[#23392]) -* update _ingest.timestamp to use new ZonedDateTime {pull}23174[#23174] (issue: {issue}23168[#23168]) -* fix date-processor to a new default year for every new pipeline execution {pull}22601[#22601] (issue: {issue}22547[#22547]) -* fix index out of bounds error in KV Processor {pull}22288[#22288] (issue: {issue}22272[#22272]) -* Fixes GrokProcessor's ignorance of named-captures with same name. {pull}22131[#22131] (issue: {issue}22117[#22117]) -* fix trace_match behavior for when there is only one grok pattern {pull}21413[#21413] (issue: {issue}21371[#21371]) -* Stored scripts and ingest node configurations should be included into a snapshot {pull}21227[#21227] (issue: {issue}21184[#21184]) -* make painless the default scripting language for ScriptProcessor {pull}20981[#20981] (issue: {issue}20943[#20943]) -* no null values in ingest configuration error messages {pull}20616[#20616] -* JSON Processor was not properly added {pull}20613[#20613] - -Inner Hits:: -* Replace NestedChildrenQuery with ParentChildrenBlockJoinQuery {pull}24016[#24016] (issue: {issue}24009[#24009]) -* Changed DisMaxQueryBuilder to extract inner hits from leaf queries {pull}23512[#23512] (issue: {issue}23482[#23482]) -* Inner hits and ignore unmapped {pull}21693[#21693] (issue: {issue}21620[#21620]) -* Skip adding a parent field to nested documents. {pull}21522[#21522] (issue: {issue}21503[#21503]) - -Internal:: -* Fix NPE if field caps request has a field that exists not in all indices {pull}24504[#24504] -* Add infrastructure to mark contexts as system contexts {pull}23830[#23830] -* Always restore the ThreadContext for operations delayed due to a block {pull}23349[#23349] -* Index creation and setting update may not return deprecation logging {pull}22702[#22702] -* Rethrow ExecutionException from the loader to concurrent callers of Cache#computeIfAbsent {pull}21549[#21549] -* Restore thread's original context before returning to the ThreadPool {pull}21411[#21411] -* Fix NPE in SearchContext.toString() {pull}21069[#21069] -* Prevent AbstractArrays from release bytes more than once {pull}20819[#20819] -* Source filtering should treat dots in field names as sub objects. {pull}20736[#20736] (issue: {issue}20719[#20719]) -* IndicesAliasesRequest should not implement CompositeIndicesRequest {pull}20726[#20726] -* Ensure elasticsearch doesn't start with unuspported indices {pull}20514[#20514] (issue: {issue}20512[#20512]) - -Java API:: -* Don't output empty ext object in SearchSourceBuilder#toXContent {pull}22093[#22093] (issue: {issue}20969[#20969]) -* Transport client: Fix remove address to actually work {pull}21743[#21743] -* Add a HostFailureListener to notify client code if a node got disconnected {pull}21709[#21709] (issue: {issue}21424[#21424]) -* Fix InternalSearchHit#hasSource to return the proper boolean value {pull}21441[#21441] (issue: {issue}21419[#21419]) -* Null checked for source when calling sourceRef {pull}21431[#21431] (issue: {issue}19279[#19279]) -* ClusterAdminClient.prepareDeletePipeline method should accept pipeline id to delete {pull}21228[#21228] -* fix IndexResponse#toString to print out shards info {pull}20562[#20562] - -Java High Level REST Client:: -* Correctly parse BulkItemResponse.Failure's status {pull}23432[#23432] - -Java REST Client:: -* Make buffer limit configurable in HeapBufferedConsumerFactory {pull}23970[#23970] (issue: {issue}23958[#23958]) -* RestClient asynchronous execution should not throw exceptions {pull}23307[#23307] -* Don't use null charset in RequestLogger {pull}22197[#22197] (issue: {issue}22190[#22190]) -* Rest client: don't reuse the same HttpAsyncResponseConsumer across multiple retries {pull}21378[#21378] - -Logging:: -* Do not prematurely shutdown Log4j {pull}21519[#21519] (issue: {issue}21514[#21514]) -* Assert status logger does not warn on Log4j usage {pull}21339[#21339] -* Fix logger names for Netty {pull}21223[#21223] (issue: {issue}20457[#20457]) -* Fix logger when you can not create an azure storage client {pull}20670[#20670] (issues: {issue}20633[#20633], {issue}20669[#20669]) -* Avoid unnecessary creation of prefix loggers {pull}20571[#20571] (issue: {issue}20570[#20570]) -* Fix logging hierarchy configs {pull}20463[#20463] -* Fix prefix logging {pull}20429[#20429] - -Mapping:: -* Preserve response headers when creating an index {pull}23950[#23950] (issue: {issue}23947[#23947]) -* Improves disabled fielddata error message {pull}23841[#23841] (issue: {issue}22768[#22768]) -* Fix MapperService StackOverflowError {pull}23605[#23605] (issue: {issue}23604[#23604]) -* Fix NPE with scaled floats stats when field is not indexed {pull}23528[#23528] (issue: {issue}23487[#23487]) -* Range types causing `GetFieldMappingsIndexRequest` to fail due to `NullPointerException` in `RangeFieldMapper.doXContentBody` when `include_defaults=true` is on the query string {pull}22925[#22925] -* Disallow introducing illegal object mappings (double '..') {pull}22891[#22891] (issue: {issue}22794[#22794]) -* The `_all` default mapper is not completely configured. {pull}22236[#22236] -* Fix MapperService.allEnabled(). {pull}22227[#22227] -* Dynamic `date` fields should use the `format` that was used to detect it is a date. {pull}22174[#22174] (issue: {issue}9410[#9410]) -* Sub-fields should not accept `include_in_all` parameter {pull}21971[#21971] (issue: {issue}21710[#21710]) -* Mappings: Fix get mapping when no indexes exist to not fail in response generation {pull}21924[#21924] (issue: {issue}21916[#21916]) -* Fail to index fields with dots in field names when one of the intermediate objects is nested. {pull}21787[#21787] (issue: {issue}21726[#21726]) -* Uncommitted mapping updates should not efect existing indices {pull}21306[#21306] (issue: {issue}21189[#21189]) - -Nested Docs:: -* Fix bug in query builder rewrite that ignores the ignore_unmapped option {pull}22456[#22456] - -Network:: -* Respect promises on pipelined responses {pull}23317[#23317] (issues: {issue}23310[#23310], {issue}23322[#23322]) -* Ensure that releasing listener is called {pull}23310[#23310] -* Pass `forceExecution` flag to transport interceptor {pull}22739[#22739] -* Ensure new connections won't be opened if transport is closed or closing {pull}22589[#22589] (issue: {issue}22554[#22554]) -* Prevent open channel leaks if handshake times out or is interrupted {pull}22554[#22554] -* Execute low level handshake in #openConnection {pull}22440[#22440] -* Handle connection close / reset events gracefully during handshake {pull}22178[#22178] -* Do not lose host information when pinging {pull}21939[#21939] (issue: {issue}21828[#21828]) -* DiscoveryNode and TransportAddress should preserve host information {pull}21828[#21828] -* Die with dignity on the network layer {pull}21720[#21720] (issue: {issue}19272[#19272]) -* Fix connection close header handling {pull}20956[#20956] (issue: {issue}20938[#20938]) -* Ensure port range is readable in the exception message {pull}20893[#20893] -* Prevent double release in TcpTransport if send listener throws an exception {pull}20880[#20880] - -Packaging:: -* Fall back to non-atomic move when removing plugins {pull}23548[#23548] (issue: {issue}35[#35]) -* Another fix for handling of paths on Windows {pull}22132[#22132] (issue: {issue}21921[#21921]) -* Fix handling of spaces in Windows paths {pull}21921[#21921] (issues: {issue}20809[#20809], {issue}21525[#21525]) -* Add option to skip kernel parameters on install {pull}21899[#21899] (issue: {issue}21877[#21877]) -* Set vm.max_map_count on systemd package install {pull}21507[#21507] -* Export ES_JVM_OPTIONS for SysV init {pull}21445[#21445] (issue: {issue}21255[#21255]) -* Debian: configure start-stop-daemon to not go into background {pull}21343[#21343] (issues: {issue}12716[#12716], {issue}21300[#21300]) -* Generate POM files with non-wildcard excludes {pull}21234[#21234] (issue: {issue}21170[#21170]) -* [Packaging] Do not remove scripts directory on upgrade {pull}20452[#20452] -* [Package] Remove bin/lib/modules directories on RPM uninstall/upgrade {pull}20448[#20448] - -Parent/Child:: -* Add null check in case of orphan child document {pull}22772[#22772] (issue: {issue}22770[#22770]) - -Percolator:: -* Fix memory leak when percolator uses bitset or field data cache {pull}24115[#24115] (issue: {issue}24108[#24108]) -* Fix NPE in percolator's 'now' range check for percolator queries with range queries {pull}22356[#22356] (issue: {issue}22355[#22355]) - -Plugin Analysis Stempel:: -* Fix thread safety of Stempel's token filter factory {pull}22610[#22610] (issue: {issue}21911[#21911]) - -Plugin Discovery EC2:: -* Fix ec2 discovery when used with IAM profiles. {pull}21048[#21048] (issue: {issue}21039[#21039]) - -Plugin Ingest GeoIp:: -* [ingest-geoip] update geoip to not include null-valued results from {pull}20455[#20455] - -Plugin Lang Painless:: -* painless: Fix method references to ctor with the new LambdaBootstrap and cleanup code {pull}24406[#24406] -* Fix Painless Lambdas for Java 9 {pull}24070[#24070] (issue: {issue}23473[#23473]) -* Fix painless's regex lexer and error messages {pull}23634[#23634] -* Replace Painless's Cast with casting strategies {pull}23369[#23369] -* Fix Bad Casts In Painless {pull}23282[#23282] (issue: {issue}23238[#23238]) -* Don't allow casting from void to def in painless {pull}22969[#22969] (issue: {issue}22908[#22908]) -* Fix def invoked qualified method refs {pull}22918[#22918] -* Whitelist some ScriptDocValues in painless {pull}22600[#22600] (issue: {issue}22584[#22584]) -* Update Painless Loop Counter to be Higher {pull}22560[#22560] (issue: {issue}22508[#22508]) -* Fix some issues with painless's strings {pull}22393[#22393] (issue: {issue}22372[#22372]) -* Test fix for def equals in Painless {pull}21945[#21945] (issue: {issue}21801[#21801]) -* Fix a VerifyError bug in Painless {pull}21765[#21765] -* Fix Lambdas in Painless to be Able to Use Top-Level Variables Such as params and doc {pull}21635[#21635] (issues: {issue}20869[#20869], {issue}21479[#21479]) -* Fix String Concatenation Bug In Painless {pull}20623[#20623] - -Plugin Repository Azure:: -* Azure blob store's readBlob() method first checks if the blob exists {pull}23483[#23483] (issue: {issue}23480[#23480]) -* Fixes default chunk size for Azure repositories {pull}22577[#22577] (issue: {issue}22513[#22513]) -* readonly on azure repository must be taken into account {pull}22055[#22055] (issues: {issue}22007[#22007], {issue}22053[#22053]) - -Plugin Repository HDFS:: -* Fixing permission errors for `KERBEROS` security mode for HDFS Repository {pull}23439[#23439] (issue: {issue}22156[#22156]) - -Plugin Repository S3:: -* Handle BlobPath's trailing separator case. Add test cases to BlobPathTests.java {pull}23091[#23091] -* Fixes leading forward slash in S3 repository base_path {pull}20861[#20861] - -Plugins:: -* Fix delete of plugin directory on remove plugin {pull}24266[#24266] (issue: {issue}24252[#24252]) -* Use a marker file when removing a plugin {pull}24252[#24252] (issue: {issue}24231[#24231]) -* Remove hidden file leniency from plugin service {pull}23982[#23982] (issue: {issue}12465[#12465]) -* Add check for null pluginName in remove command {pull}22930[#22930] (issue: {issue}22922[#22922]) -* Use sysprop like with es.path.home to pass conf dir {pull}18870[#18870] (issue: {issue}18689[#18689]) - -Query DSL:: -* FuzzyQueryBuilder should error when parsing array of values {pull}23762[#23762] (issue: {issue}23759[#23759]) -* Fix parsing for `max_determinized_states` {pull}22749[#22749] (issue: {issue}22722[#22722]) -* Fix script score function that combines _score and weight {pull}22713[#22713] (issue: {issue}21483[#21483]) -* Fixes date range query using epoch with timezone {pull}21542[#21542] (issue: {issue}21501[#21501]) -* Allow overriding all-field leniency when `lenient` option is specified {pull}21504[#21504] (issues: {issue}20925[#20925], {issue}21341[#21341]) -* Max score should be updated when a rescorer is used {pull}20977[#20977] (issue: {issue}20651[#20651]) -* Fixes MultiMatchQuery so that it doesn't provide a null context {pull}20882[#20882] -* Fix silently accepting malformed queries {pull}20515[#20515] (issue: {issue}20500[#20500]) -* Fix match_phrase_prefix query with single term on _all field {pull}20471[#20471] (issue: {issue}20470[#20470]) - -REST:: -* [API] change wait_for_completion default according to docs {pull}23672[#23672] -* Deprecate request_cache for clear-cache {pull}23638[#23638] (issue: {issue}22748[#22748]) -* HTTP transport stashes the ThreadContext instead of the RestController {pull}23456[#23456] -* Fix date format in warning headers {pull}23418[#23418] (issue: {issue}23275[#23275]) -* Align REST specs for HEAD requests {pull}23313[#23313] (issue: {issue}21125[#21125]) -* Correct warning header to be compliant {pull}23275[#23275] (issue: {issue}22986[#22986]) -* Fix get HEAD requests {pull}23186[#23186] (issue: {issue}21125[#21125]) -* Handle bad HTTP requests {pull}23153[#23153] (issue: {issue}23034[#23034]) -* Fix get source HEAD requests {pull}23151[#23151] (issue: {issue}21125[#21125]) -* Properly encode location header {pull}23133[#23133] (issues: {issue}21057[#21057], {issue}23115[#23115]) -* Fix template HEAD requests {pull}23130[#23130] (issue: {issue}21125[#21125]) -* Fix index HEAD requests {pull}23112[#23112] (issue: {issue}21125[#21125]) -* Fix alias HEAD requests {pull}23094[#23094] (issue: {issue}21125[#21125]) -* Strict level parsing for indices stats {pull}21577[#21577] (issue: {issue}21024[#21024]) -* The routing query string param is supported by mget but was missing from the rest spec {pull}21357[#21357] -* fix thread_pool_patterns path variable definition {pull}21332[#21332] -* Read indices options in indices upgrade API {pull}21281[#21281] (issue: {issue}21099[#21099]) -* ensure the XContentBuilder is always closed in RestBuilderListener {pull}21124[#21124] -* Add correct Content-Length on HEAD requests {pull}21123[#21123] (issue: {issue}21077[#21077]) -* Make sure HEAD / has 0 Content-Length {pull}21077[#21077] (issue: {issue}21075[#21075]) -* Adds percent-encoding for Location headers {pull}21057[#21057] (issue: {issue}21016[#21016]) -* Whitelist node stats indices level parameter {pull}21024[#21024] (issue: {issue}20722[#20722]) -* Remove lenient URL parameter parsing {pull}20722[#20722] (issue: {issue}14719[#14719]) -* XContentBuilder: Avoid building self-referencing objects {pull}20550[#20550] (issues: {issue}19475[#19475], {issue}20540[#20540]) - -Recovery:: -* Provide target allocation id as part of start recovery request {pull}24333[#24333] (issue: {issue}24167[#24167]) -* Fix primary relocation for shadow replicas {pull}22474[#22474] (issue: {issue}20300[#20300]) -* Don't close store under CancellableThreads {pull}22434[#22434] (issue: {issue}22325[#22325]) -* Use a fresh recovery id when retrying recoveries {pull}22325[#22325] (issue: {issue}22043[#22043]) -* Allow flush/force_merge/upgrade on shard marked as relocated {pull}22078[#22078] (issue: {issue}22043[#22043]) -* Fix concurrency issues between cancelling a relocation and marking shard as relocated {pull}20443[#20443] - -Reindex API:: -* Fix throttled reindex_from_remote {pull}23953[#23953] (issues: {issue}23828[#23828], {issue}23945[#23945]) -* Fix reindex with a remote source on a version before 2.0.0 {pull}23805[#23805] -* Make reindex wait for cleanup before responding {pull}23677[#23677] (issue: {issue}23653[#23653]) -* Reindex: do not log when can't clear old scroll {pull}22942[#22942] (issue: {issue}22937[#22937]) -* Fix reindex-from-remote from <2.0 {pull}22931[#22931] (issue: {issue}22893[#22893]) -* Fix reindex from remote clearing scroll {pull}22525[#22525] (issue: {issue}22514[#22514]) -* Fix source filtering in reindex-from-remote {pull}22514[#22514] (issue: {issue}22507[#22507]) -* Remove content type detection from reindex-from-remote {pull}22504[#22504] (issue: {issue}22329[#22329]) -* Don't close rest client from its callback {pull}22061[#22061] (issue: {issue}22027[#22027]) -* Keep context during reindex's retries {pull}21941[#21941] -* Ignore IllegalArgumentException with assertVersionSerializable {pull}21409[#21409] (issues: {issue}20767[#20767], {issue}21350[#21350]) -* Bump reindex-from-remote's buffer to 200mb {pull}21222[#21222] (issue: {issue}21185[#21185]) -* Fix reindex-from-remote for parent/child from <2.0 {pull}21070[#21070] (issue: {issue}21044[#21044]) - -Scripting:: -* Convert script/template objects to json format internally {pull}23308[#23308] (issue: {issue}23245[#23245]) -* Script: Fix value of `ctx._now` to be current epoch time in milliseconds {pull}23175[#23175] (issue: {issue}23169[#23169]) -* Expose `ip` fields as strings in scripts. {pull}21997[#21997] (issue: {issue}21977[#21977]) -* Add support for booleans in scripts {pull}20950[#20950] (issue: {issue}20949[#20949]) -* Native scripts should be created once per index, not per segment. {pull}20609[#20609] - -Search:: -* Include all aliases including non-filtering in `_search_shards` response {pull}24489[#24489] -* Cross Cluster Search: propagate original indices per cluster {pull}24328[#24328] -* Query string default field {pull}24214[#24214] -* Speed up parsing of large `terms` queries. {pull}24210[#24210] -* IndicesQueryCache should delegate the scorerSupplier method. {pull}24209[#24209] -* Disable graph analysis at query time for shingle and cjk filters producing tokens of different size {pull}23920[#23920] (issue: {issue}23918[#23918]) -* Fix cross-cluster remote node gateway attributes {pull}23863[#23863] -* Use a fixed seed for computing term hashCode in TermsSliceQuery {pull}23795[#23795] -* Honor max concurrent searches in multi-search {pull}23538[#23538] (issue: {issue}23527[#23527]) -* Avoid stack overflow in multi-search {pull}23527[#23527] (issue: {issue}23523[#23523]) -* Fix query_string_query to transform "foo:*" in an exists query on the field name {pull}23433[#23433] (issue: {issue}23356[#23356]) -* Factor out filling of TopDocs in SearchPhaseController {pull}23380[#23380] (issues: {issue}19356[#19356], {issue}23357[#23357]) -* Replace blocking calls in ExpandCollapseSearchResponseListener by asynchronous requests {pull}23053[#23053] (issue: {issue}23048[#23048]) -* Ensure fixed serialization order of InnerHitBuilder {pull}22820[#22820] (issue: {issue}22808[#22808]) -* Improve concurrency of ShardCoreKeyMap. {pull}22316[#22316] -* Make `-0` compare less than `+0` consistently. {pull}22173[#22173] (issue: {issue}22167[#22167]) -* Fix boost_mode propagation when the function score query builder is rewritten {pull}22172[#22172] (issue: {issue}22138[#22138]) -* FiltersAggregationBuilder: rewriting filter queries, the same way as in FilterAggregationBuilder {pull}22076[#22076] -* Fix cross_fields type on multi_match query with synonyms {pull}21638[#21638] (issue: {issue}21633[#21633]) -* Fix match_phrase_prefix on boosted fields {pull}21623[#21623] (issue: {issue}21613[#21613]) -* Respect default search timeout {pull}21599[#21599] (issues: {issue}12211[#12211], {issue}21595[#21595]) -* Remove LateParsingQuery to prevent timestamp access after context is frozen {pull}21328[#21328] (issue: {issue}21295[#21295]) -* Make range queries round up upper bounds again. {pull}20582[#20582] (issues: {issue}20579[#20579], {issue}8889[#8889]) -* Throw error when trying to fetch fields from source and source is disabled {pull}20424[#20424] (issues: {issue}20093[#20093], {issue}20408[#20408]) - -Search Templates:: -* No longer add illegal content type option to stored search templates {pull}24251[#24251] (issue: {issue}24227[#24227]) -* SearchTemplateRequest to implement CompositeIndicesRequest {pull}21865[#21865] (issue: {issue}21747[#21747]) - -Settings:: -* Do not set path.data in environment if not set {pull}24132[#24132] (issue: {issue}24099[#24099]) -* Correct handling of default and array settings {pull}24074[#24074] (issues: {issue}23981[#23981], {issue}24052[#24052]) -* Fix merge scheduler config settings {pull}23391[#23391] -* Settings: Fix keystore cli prompting for yes/no to handle console returning null {pull}23320[#23320] -* Expose `search.highlight.term_vector_multi_value` as a node level setting {pull}22999[#22999] -* NPE when no setting name passed to elasticsearch-keystore {pull}22609[#22609] -* Handle spaces in `action.auto_create_index` gracefully {pull}21790[#21790] (issue: {issue}21449[#21449]) -* Fix settings diff generation for affix and group settings {pull}21788[#21788] -* Don't reset non-dynamic settings unless explicitly requested {pull}21646[#21646] (issue: {issue}21593[#21593]) -* Fix Setting.timeValue() method {pull}20696[#20696] (issue: {issue}20662[#20662]) -* Add a hard limit for `index.number_of_shard` {pull}20682[#20682] -* Include complex settings in settings requests {pull}20622[#20622] - -Snapshot/Restore:: -* Fixes maintaining the shards a snapshot is waiting on {pull}24289[#24289] -* Fixes snapshot status on failed snapshots {pull}23833[#23833] (issue: {issue}23716[#23716]) -* Fixes snapshot deletion handling on in-progress snapshot failure {pull}23703[#23703] (issue: {issue}23663[#23663]) -* Prioritize listing index-N blobs over index.latest in reading snapshots {pull}23333[#23333] -* Gracefully handles pre 2.x compressed snapshots {pull}22267[#22267] -* URLRepository should throw NoSuchFileException to correctly adhere to readBlob contract {pull}22069[#22069] (issue: {issue}22004[#22004]) -* Fixes shard level snapshot metadata loading when index-N file is missing {pull}21813[#21813] -* Ensures cleanup of temporary index-* generational blobs during snapshotting {pull}21469[#21469] (issue: {issue}21462[#21462]) -* Fixes get snapshot duplicates when asking for _all {pull}21340[#21340] (issue: {issue}21335[#21335]) - -Stats:: -* Avoid overflow when computing total FS stats {pull}23641[#23641] -* Handle existence of cgroup version 2 hierarchy {pull}23493[#23493] (issue: {issue}23486[#23486]) -* Handle long overflow when adding paths' totals {pull}23293[#23293] (issue: {issue}23093[#23093]) -* Fix control group pattern {pull}23219[#23219] (issue: {issue}23218[#23218]) -* Fix total disk bytes returning negative value {pull}23093[#23093] -* Implement stats for geo_point and geo_shape field {pull}22391[#22391] (issue: {issue}22384[#22384]) -* Use reader for doc stats {pull}22317[#22317] (issue: {issue}22285[#22285]) -* Avoid NPE in NodeService#stats if HTTP is disabled {pull}22060[#22060] (issue: {issue}22058[#22058]) -* Add support for "include_segment_file_sizes" in indices stats REST handler {pull}21879[#21879] (issue: {issue}21878[#21878]) -* Remove output_uuid parameter from cluster stats {pull}21020[#21020] (issue: {issue}20722[#20722]) -* Fix FieldStats deserialization of `ip` field {pull}20522[#20522] (issue: {issue}20516[#20516]) - -Task Manager:: -* Task Management: Make TaskInfo parsing forwards compatible {pull}24073[#24073] (issue: {issue}23250[#23250]) -* Fix hanging cancelling task with no children {pull}22796[#22796] -* Fix broken TaskInfo.toString() {pull}22698[#22698] (issue: {issue}22387[#22387]) -* Task cancellation command should wait for all child nodes to receive cancellation request before returning {pull}21397[#21397] (issue: {issue}21126[#21126]) - -Term Vectors:: -* Fix _termvectors with preference to not hit NPE {pull}21959[#21959] -* Return correct term statistics when a field is not found in a shard {pull}21922[#21922] (issue: {issue}21906[#21906]) - -Tribe Node:: -* Add socket permissions for tribe nodes {pull}21546[#21546] (issues: {issue}16392[#16392], {issue}21122[#21122]) - - - -[[regression-6.0.0-alpha1-5x]] -[float] -=== Regressions - -Bulk:: -* Fix _bulk response when it can't create an index {pull}24048[#24048] (issues: {issue}22488[#22488], {issue}24028[#24028]) - -Core:: -* Source filtering: only accept array items if the previous include pattern matches {pull}22593[#22593] (issue: {issue}22557[#22557]) - -Highlighting:: -* Handle SynonymQuery extraction for the FastVectorHighlighter {pull}20829[#20829] (issue: {issue}20781[#20781]) - -Logging:: -* Restores the original default format of search slow log {pull}21770[#21770] (issue: {issue}21711[#21711]) - -Network:: -* You had one job Netty logging guard {pull}24469[#24469] (issues: {issue}5624[#5624], {issue}6568[#6568]) - -Plugin Discovery EC2:: -* Fix ec2 discovery when used with IAM profiles. {pull}21042[#21042] (issue: {issue}21039[#21039]) - -Plugin Repository S3:: -* Fix s3 repository when used with IAM profiles {pull}21058[#21058] (issue: {issue}21048[#21048]) - -Plugins:: -* Plugins: Add back user agent when downloading plugins {pull}20872[#20872] - -Search:: -* Handle specialized term queries in MappedFieldType.extractTerm(Query) {pull}21889[#21889] (issue: {issue}21882[#21882]) - - - -[[upgrade-6.0.0-alpha1-5x]] -[float] -=== Upgrades - -Aggregations:: -* Upgrade HDRHistogram to 2.1.9 {pull}23254[#23254] (issue: {issue}23239[#23239]) - -Core:: -* Upgrade to Lucene 6.5.0 {pull}23750[#23750] -* Upgrade from JNA 4.2.2 to JNA 4.4.0 {pull}23636[#23636] -* Upgrade to lucene-6.5.0-snapshot-d00c5ca {pull}23385[#23385] -* Upgrade to lucene-6.5.0-snapshot-f919485. {pull}23087[#23087] -* Upgrade to Lucene 6.4.0 {pull}22724[#22724] -* Update Jackson to 2.8.6 {pull}22596[#22596] (issue: {issue}22266[#22266]) -* Upgrade to lucene-6.4.0-snapshot-084f7a0. {pull}22413[#22413] -* Upgrade to lucene-6.4.0-snapshot-ec38570 {pull}21853[#21853] -* Upgrade to lucene-6.3.0. {pull}21464[#21464] - -Dates:: -* Update Joda Time to version 2.9.5 {pull}21468[#21468] (issues: {issue}20911[#20911], {issue}332[#332], {issue}373[#373], {issue}378[#378], {issue}379[#379], {issue}386[#386], {issue}394[#394], {issue}396[#396], {issue}397[#397], {issue}404[#404], {issue}69[#69]) - -Internal:: -* Upgrade to Lucene 6.4.1. {pull}22978[#22978] - -Logging:: -* Upgrade to Log4j 2.8.2 {pull}23995[#23995] -* Upgrade Log4j 2 to version 2.7 {pull}20805[#20805] (issue: {issue}20304[#20304]) - -Network:: -* Upgrade Netty to 4.1.10.Final {pull}24414[#24414] -* Upgrade to Netty 4.1.9 {pull}23540[#23540] (issues: {issue}23172[#23172], {issue}6308[#6308], {issue}6374[#6374]) -* Upgrade to Netty 4.1.8 {pull}23055[#23055] -* Upgrade to Netty 4.1.7 {pull}22587[#22587] -* Upgrade to Netty 4.1.6 {pull}21051[#21051] - -Plugin Repository Azure:: -* Update to Azure Storage 5.0.0 {pull}23517[#23517] (issue: {issue}23448[#23448]) - diff --git a/docs/reference/release-notes/6.0.0-alpha1.asciidoc b/docs/reference/release-notes/6.0.0-alpha1.asciidoc deleted file mode 100644 index a2001af7a2edf..0000000000000 --- a/docs/reference/release-notes/6.0.0-alpha1.asciidoc +++ /dev/null @@ -1,312 +0,0 @@ -[[release-notes-6.0.0-alpha1]] -== 6.0.0-alpha1 Release Notes - -The changes listed below have been released for the first time in Elasticsearch 6.0.0-alpha1. Changes in this release which were first released in the 5.x series are listed in <>. - - -Also see <>. - -[[breaking-6.0.0-alpha1]] -[float] -=== Breaking changes - -Allocation:: -* Remove `cluster.routing.allocation.snapshot.relocation_enabled` setting {pull}20994[#20994] - -Analysis:: -* Removing query-string parameters in `_analyze` API {pull}20704[#20704] (issue: {issue}20246[#20246]) - -CAT API:: -* Write -1 on unbounded queue in cat thread pool {pull}21342[#21342] (issue: {issue}21187[#21187]) - -CRUD:: -* Disallow `VersionType.FORCE` for GetRequest {pull}21079[#21079] (issue: {issue}20995[#20995]) -* Disallow `VersionType.FORCE` versioning for 6.x indices {pull}20995[#20995] (issue: {issue}20377[#20377]) - -Cluster:: -* No longer allow cluster name in data path {pull}20433[#20433] (issue: {issue}20391[#20391]) - -Core:: -* Simplify file store {pull}24402[#24402] (issue: {issue}24390[#24390]) -* Make boolean conversion strict {pull}22200[#22200] -* Remove the `default` store type. {pull}21616[#21616] -* Remove store throttling. {pull}21573[#21573] - -Geo:: -* Remove deprecated geo search features {pull}22876[#22876] -* Reduce GeoDistance Insanity {pull}19846[#19846] - -Index APIs:: -* Open/Close index api to allow_no_indices by default {pull}24401[#24401] (issues: {issue}24031[#24031], {issue}24341[#24341]) -* Remove support for controversial `ignore_unavailable` and `allow_no_indices` from indices exists api {pull}20712[#20712] - -Index Templates:: -* Allows multiple patterns to be specified for index templates {pull}21009[#21009] (issue: {issue}20690[#20690]) - -Java API:: -* Enforce Content-Type requirement on the rest layer and remove deprecated methods {pull}23146[#23146] (issue: {issue}19388[#19388]) - -Mapping:: -* Enforce at most one type. {pull}24428[#24428] (issue: {issue}24317[#24317]) -* Disallow `include_in_all` for 6.0+ indices {pull}22970[#22970] (issue: {issue}22923[#22923]) -* Disable _all by default, disallow configuring _all on 6.0+ indices {pull}22144[#22144] (issues: {issue}19784[#19784], {issue}20925[#20925], {issue}21341[#21341]) -* Throw an exception on unrecognized "match_mapping_type" {pull}22090[#22090] (issue: {issue}17285[#17285]) - -Network:: -* Remove blocking TCP clients and servers {pull}22639[#22639] -* Remove `modules/transport_netty_3` in favor of `netty_4` {pull}21590[#21590] -* Remove LocalTransport in favor of MockTcpTransport {pull}20695[#20695] - -Packaging:: -* Remove customization of ES_USER and ES_GROUP {pull}23989[#23989] (issue: {issue}23848[#23848]) - -Percolator:: -* Remove deprecated percolate and mpercolate apis {pull}22331[#22331] - -Plugin Delete By Query:: -* Require explicit query in _delete_by_query API {pull}23632[#23632] (issue: {issue}23629[#23629]) - -Plugin Discovery EC2:: -* Ec2 Discovery: Cleanup deprecated settings {pull}24150[#24150] -* Discovery EC2: Remove region setting {pull}23991[#23991] (issue: {issue}22758[#22758]) -* AWS Plugins: Remove signer type setting {pull}23984[#23984] (issue: {issue}22599[#22599]) - -Plugin Lang JS:: -* Remove lang-python and lang-javascript {pull}20734[#20734] (issue: {issue}20698[#20698]) - -Plugin Mapper Attachment:: -* Remove mapper attachments plugin {pull}20416[#20416] (issue: {issue}18837[#18837]) - -Plugin Repository Azure:: -* Remove global `repositories.azure` settings {pull}23262[#23262] (issues: {issue}22800[#22800], {issue}22856[#22856]) -* Remove auto creation of container for azure repository {pull}22858[#22858] (issue: {issue}22857[#22857]) - -Plugin Repository S3:: -* S3 Repository: Cleanup deprecated settings {pull}24097[#24097] -* S3 Repository: Remove region setting {pull}22853[#22853] (issue: {issue}22758[#22758]) -* S3 Repository: Remove bucket auto create {pull}22846[#22846] (issue: {issue}22761[#22761]) -* S3 Repository: Remove env var and sysprop credentials support {pull}22842[#22842] - -Query DSL:: -* Remove deprecated `minimum_number_should_match` in BoolQueryBuilder {pull}22416[#22416] -* Remove support for empty queries {pull}22092[#22092] (issue: {issue}17624[#17624]) -* Remove deprecated query names: in, geo_bbox, mlt, fuzzy_match and match_fuzzy {pull}21852[#21852] -* The `terms` query should always map to a Lucene `TermsQuery`. {pull}21786[#21786] -* Be strict when parsing values searching for booleans {pull}21555[#21555] (issue: {issue}21545[#21545]) -* Remove collect payloads parameter {pull}20385[#20385] - -REST:: -* Remove ldjson support and document ndjson for bulk/msearch {pull}23049[#23049] (issue: {issue}23025[#23025]) -* Enable strict duplicate checks for all XContent types {pull}22225[#22225] (issues: {issue}19614[#19614], {issue}22073[#22073]) -* Enable strict duplicate checks for JSON content {pull}22073[#22073] (issue: {issue}19614[#19614]) -* Remove lenient stats parsing {pull}21417[#21417] (issues: {issue}20722[#20722], {issue}21410[#21410]) -* Remove allow unquoted JSON {pull}20388[#20388] (issues: {issue}17674[#17674], {issue}17801[#17801]) -* Remove FORCE version_type {pull}20377[#20377] (issue: {issue}19769[#19769]) - -Scripting:: -* Make dates be ReadableDateTimes in scripts {pull}22948[#22948] (issue: {issue}22875[#22875]) -* Remove groovy scripting language {pull}21607[#21607] - -Search:: -* ProfileResult and CollectorResult should print machine readable timing information {pull}22561[#22561] -* Remove indices query {pull}21837[#21837] (issue: {issue}17710[#17710]) -* Remove ignored type parameter in search_shards api {pull}21688[#21688] - -Sequence IDs:: -* Change certain replica failures not to fail the replica shard {pull}22874[#22874] (issue: {issue}10708[#10708]) - -Shadow Replicas:: -* Remove shadow replicas {pull}23906[#23906] (issue: {issue}22024[#22024]) - - - -[[breaking-java-6.0.0-alpha1]] -[float] -=== Breaking Java changes - -Java API:: -* Java api: ActionRequestBuilder#execute to return a PlainActionFuture {pull}24415[#24415] (issues: {issue}24412[#24412], {issue}9201[#9201]) - -Network:: -* Simplify TransportAddress {pull}20798[#20798] - - - -[[deprecation-6.0.0-alpha1]] -[float] -=== Deprecations - -Index Templates:: -* Restore deprecation warning for invalid match_mapping_type values {pull}22304[#22304] - -Internal:: -* Deprecate XContentType auto detection methods in XContentFactory {pull}22181[#22181] (issue: {issue}19388[#19388]) - - - -[[feature-6.0.0-alpha1]] -[float] -=== New features - -Core:: -* Enable index-time sorting {pull}24055[#24055] (issue: {issue}6720[#6720]) - - - -[[enhancement-6.0.0-alpha1]] -[float] -=== Enhancements - -Aggregations:: -* Agg builder accessibility fixes {pull}24323[#24323] -* Remove support for the include/pattern syntax. {pull}23141[#23141] (issue: {issue}22933[#22933]) -* Promote longs to doubles when a terms agg mixes decimal and non-decimal numbers {pull}22449[#22449] (issue: {issue}22232[#22232]) - -Analysis:: -* Match- and MultiMatchQueryBuilder should only allow setting analyzer on string values {pull}23684[#23684] (issue: {issue}21665[#21665]) - -Bulk:: -* Simplify bulk request execution {pull}20109[#20109] - -CRUD:: -* Added validation for upsert request {pull}24282[#24282] (issue: {issue}16671[#16671]) - -Cluster:: -* Separate publishing from applying cluster states {pull}24236[#24236] -* Adds cluster state size to /_cluster/state response {pull}23440[#23440] (issue: {issue}3415[#3415]) - -Core:: -* Remove connect SocketPermissions from core {pull}22797[#22797] -* Add repository-url module and move URLRepository {pull}22752[#22752] (issue: {issue}22116[#22116]) -* Remove accept SocketPermissions from core {pull}22622[#22622] (issue: {issue}22116[#22116]) -* Move IfConfig.logIfNecessary call into bootstrap {pull}22455[#22455] (issue: {issue}22116[#22116]) -* Remove artificial default processors limit {pull}20874[#20874] (issue: {issue}20828[#20828]) -* Simplify write failure handling {pull}19105[#19105] (issue: {issue}20109[#20109]) - -Engine:: -* Fill missing sequence IDs up to max sequence ID when recovering from store {pull}24238[#24238] (issue: {issue}10708[#10708]) -* Use sequence numbers to identify out of order delivery in replicas & recovery {pull}24060[#24060] (issue: {issue}10708[#10708]) -* Add replica ops with version conflict to translog {pull}22626[#22626] -* Clarify global checkpoint recovery {pull}21934[#21934] (issue: {issue}21254[#21254]) - -Internal:: -* Try to convince the JVM not to lose stacktraces {pull}24426[#24426] (issue: {issue}24376[#24376]) -* Make document write requests immutable {pull}23038[#23038] - -Java High Level REST Client:: -* Add info method to High Level Rest client {pull}23350[#23350] -* Add support for named xcontent parsers to high level REST client {pull}23328[#23328] -* Add BulkRequest support to High Level Rest client {pull}23312[#23312] -* Add UpdateRequest support to High Level Rest client {pull}23266[#23266] -* Add delete API to the High Level Rest Client {pull}23187[#23187] -* Add Index API to High Level Rest Client {pull}23040[#23040] -* Add get/exists method to RestHighLevelClient {pull}22706[#22706] -* Add fromxcontent methods to delete response {pull}22680[#22680] (issue: {issue}22229[#22229]) -* Add REST high level client gradle submodule and first simple method {pull}22371[#22371] - -Java REST Client:: -* Wrap rest httpclient with doPrivileged blocks {pull}22603[#22603] (issue: {issue}22116[#22116]) - -Mapping:: -* Date detection should not rely on a hardcoded set of characters. {pull}22171[#22171] (issue: {issue}1694[#1694]) - -Network:: -* Isolate SocketPermissions to Netty {pull}23057[#23057] -* Wrap netty accept/connect ops with doPrivileged {pull}22572[#22572] (issue: {issue}22116[#22116]) -* Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions {pull}22287[#22287] (issue: {issue}22116[#22116]) - -Plugin Discovery EC2:: -* Read ec2 discovery address from aws instance tags {pull}22743[#22743] (issue: {issue}22566[#22566]) - -Plugin Repository HDFS:: -* Add doPrivilege blocks for socket connect ops in repository-hdfs {pull}22793[#22793] (issue: {issue}22116[#22116]) - -Plugins:: -* Add doPrivilege blocks for socket connect operations in plugins {pull}22534[#22534] (issue: {issue}22116[#22116]) - -Recovery:: -* Peer Recovery: remove maxUnsafeAutoIdTimestamp hand off {pull}24243[#24243] (issue: {issue}24149[#24149]) -* Introduce sequence-number-based recovery {pull}22484[#22484] (issue: {issue}10708[#10708]) - -Search:: -* Add parsing from xContent to Suggest {pull}22903[#22903] -* Add parsing from xContent to ShardSearchFailure {pull}22699[#22699] - -Sequence IDs:: -* Block global checkpoint advances when recovering {pull}24404[#24404] (issue: {issue}10708[#10708]) -* Add primary term to doc write response {pull}24171[#24171] (issue: {issue}10708[#10708]) -* Preserve multiple translog generations {pull}24015[#24015] (issue: {issue}10708[#10708]) -* Introduce translog generation rolling {pull}23606[#23606] (issue: {issue}10708[#10708]) -* Replicate write failures {pull}23314[#23314] -* Introduce sequence-number-aware translog {pull}22822[#22822] (issue: {issue}10708[#10708]) -* Introduce translog no-op {pull}22291[#22291] (issue: {issue}10708[#10708]) -* Tighten sequence numbers recovery {pull}22212[#22212] (issue: {issue}10708[#10708]) -* Add BWC layer to seq no infra and enable BWC tests {pull}22185[#22185] (issue: {issue}21670[#21670]) -* Add internal _primary_term doc values field, fix _seq_no indexing {pull}21637[#21637] (issues: {issue}10708[#10708], {issue}21480[#21480]) -* Add global checkpoint to translog checkpoints {pull}21254[#21254] -* Sequence numbers commit data for Lucene uses Iterable interface {pull}20793[#20793] (issue: {issue}10708[#10708]) -* Simplify GlobalCheckpointService and properly hook it for cluster state updates {pull}20720[#20720] - -Stats:: -* Expose disk usage estimates in nodes stats {pull}22081[#22081] (issue: {issue}8686[#8686]) - -Store:: -* Remote support for lucene versions without checksums {pull}24021[#24021] - -Suggesters:: -* Remove deprecated _suggest endpoint {pull}22203[#22203] (issue: {issue}20305[#20305]) - -Task Manager:: -* Add descriptions to bulk tasks {pull}22059[#22059] (issue: {issue}21768[#21768]) - - - -[[bug-6.0.0-alpha1]] -[float] -=== Bug fixes - -Ingest:: -* Remove support for Visio and potm files {pull}22079[#22079] (issue: {issue}22077[#22077]) - -Inner Hits:: -* If size / offset are out of bounds just do a plain count {pull}20556[#20556] (issue: {issue}20501[#20501]) - -Internal:: -* Fix handling of document failure exception in InternalEngine {pull}22718[#22718] - -Plugin Ingest Attachment:: -* Add missing mime4j library {pull}22764[#22764] (issue: {issue}22077[#22077]) - -Plugin Repository S3:: -* Wrap getCredentials() in a doPrivileged() block {pull}23297[#23297] (issues: {issue}22534[#22534], {issue}23271[#23271]) - -Sequence IDs:: -* Avoid losing ops in file-based recovery {pull}22945[#22945] (issue: {issue}22484[#22484]) - -Snapshot/Restore:: -* Keep snapshot restore state and routing table in sync {pull}20836[#20836] (issue: {issue}19774[#19774]) - -Translog:: -* Fix Translog.Delete serialization for sequence numbers {pull}22543[#22543] - - - -[[regression-6.0.0-alpha1]] -[float] -=== Regressions - -Bulk:: -* Only re-parse operation if a mapping update was needed {pull}23832[#23832] (issue: {issue}23665[#23665]) - - - -[[upgrade-6.0.0-alpha1]] -[float] -=== Upgrades - -Core:: -* Upgrade to a Lucene 7 snapshot {pull}24089[#24089] (issues: {issue}23966[#23966], {issue}24086[#24086], {issue}24087[#24087], {issue}24088[#24088]) - -Plugin Ingest Attachment:: -* Update to Tika 1.14 {pull}21591[#21591] (issue: {issue}20390[#20390]) - diff --git a/docs/reference/release-notes/6.0.0-alpha2.asciidoc b/docs/reference/release-notes/6.0.0-alpha2.asciidoc deleted file mode 100644 index c89ddf9cb37bc..0000000000000 --- a/docs/reference/release-notes/6.0.0-alpha2.asciidoc +++ /dev/null @@ -1,180 +0,0 @@ -[[release-notes-6.0.0-alpha2]] -== 6.0.0-alpha2 Release Notes - -Also see <>. - -[[breaking-6.0.0-alpha2]] -[float] -=== Breaking changes - -CRUD:: -* Deleting a document from a non-existing index creates the indexIf the index does not exist, delete document will not auto create it {pull}24518[#24518] (issue: {issue}15425[#15425]) - -Plugin Analysis ICU:: -* Upgrade icu4j to latest version {pull}24821[#24821] - -Plugin Repository S3:: -* Remove deprecated S3 settings {pull}24445[#24445] - -Scripting:: -* Remove script access to term statistics {pull}19462[#19462] (issue: {issue}19359[#19359]) - - - -[[breaking-java-6.0.0-alpha2]] -[float] -=== Breaking Java changes - -Aggregations:: -* Make Terms.Bucket an interface rather than an abstract class {pull}24492[#24492] -* Compound order for histogram aggregations {pull}22343[#22343] (issues: {issue}14771[#14771], {issue}20003[#20003], {issue}23613[#23613]) - -Plugins:: -* Drop name from TokenizerFactory {pull}24869[#24869] - - - -[[deprecation-6.0.0-alpha2]] -[float] -=== Deprecations - -Settings:: -* Deprecate settings in .yml and .json {pull}24059[#24059] (issue: {issue}19391[#19391]) - - - -[[feature-6.0.0-alpha2]] -[float] -=== New features - -Aggregations:: -* SignificantText aggregation - like significant_terms, but for text {pull}24432[#24432] (issue: {issue}23674[#23674]) - -Internal:: -* Automatically adjust search threadpool queue_size {pull}23884[#23884] (issue: {issue}3890[#3890]) - -Mapping:: -* Add new ip_range field type {pull}24433[#24433] - -Plugin Analysis ICU:: -* Add ICUCollationFieldMapper {pull}24126[#24126] - - - -[[enhancement-6.0.0-alpha2]] -[float] -=== Enhancements - -Core:: -* Improve bootstrap checks error messages {pull}24548[#24548] - -Engine:: -* Move the IndexDeletionPolicy to be engine internal {pull}24930[#24930] (issue: {issue}10708[#10708]) - -Internal:: -* Add assertions enabled helper {pull}24834[#24834] - -Java High Level REST Client:: -* Add doc_count to ParsedMatrixStats {pull}24952[#24952] (issue: {issue}24776[#24776]) -* Add fromXContent method to ClearScrollResponse {pull}24909[#24909] -* ClearScrollRequest to implement ToXContentObject {pull}24907[#24907] -* SearchScrollRequest to implement ToXContentObject {pull}24906[#24906] (issue: {issue}3889[#3889]) -* Add aggs parsers for high level REST Client {pull}24824[#24824] (issues: {issue}23965[#23965], {issue}23973[#23973], {issue}23974[#23974], {issue}24085[#24085], {issue}24160[#24160], {issue}24162[#24162], {issue}24182[#24182], {issue}24183[#24183], {issue}24208[#24208], {issue}24213[#24213], {issue}24239[#24239], {issue}24284[#24284], {issue}24312[#24312], {issue}24330[#24330], {issue}24365[#24365], {issue}24371[#24371], {issue}24442[#24442], {issue}24521[#24521], {issue}24524[#24524], {issue}24564[#24564], {issue}24583[#24583], {issue}24589[#24589], {issue}24648[#24648], {issue}24667[#24667], {issue}24675[#24675], {issue}24682[#24682], {issue}24700[#24700], {issue}24706[#24706], {issue}24717[#24717], {issue}24720[#24720], {issue}24738[#24738], {issue}24746[#24746], {issue}24789[#24789], {issue}24791[#24791], {issue}24794[#24794], {issue}24796[#24796], {issue}24822[#24822]) - -Mapping:: -* Identify documents by their `_id`. {pull}24460[#24460] - -Packaging:: -* Set number of processes in systemd unit file {pull}24970[#24970] (issue: {issue}20874[#20874]) - -Plugin Lang Painless:: -* Make Painless Compiler Use an Instance Per Context {pull}24972[#24972] -* Make PainlessScript An Interface {pull}24966[#24966] - -Recovery:: -* Introduce primary context {pull}25031[#25031] (issue: {issue}10708[#10708]) - -Scripting:: -* Add StatefulFactoryType as optional intermediate factory in script contexts {pull}24974[#24974] (issue: {issue}20426[#20426]) -* Make contexts available to ScriptEngine construction {pull}24896[#24896] -* Make ScriptEngine.compile generic on the script context {pull}24873[#24873] -* Add instance and compiled classes to script contexts {pull}24868[#24868] - -Search:: -* Eliminate array access in tight loops when profiling is enabled. {pull}24959[#24959] -* Support Multiple Inner Hits on a Field Collapse Request {pull}24517[#24517] -* Expand cross cluster search indices for search requests to the concrete index or to it's aliases {pull}24502[#24502] - -Search Templates:: -* Add max concurrent searches to multi template search {pull}24255[#24255] (issues: {issue}20912[#20912], {issue}21907[#21907]) - -Sequence IDs:: -* Fill gaps on primary promotion {pull}24945[#24945] (issue: {issue}10708[#10708]) -* Introduce clean transition on primary promotion {pull}24925[#24925] (issue: {issue}10708[#10708]) -* Guarantee that translog generations are seqNo conflict free {pull}24825[#24825] (issues: {issue}10708[#10708], {issue}24779[#24779]) -* Inline global checkpoints {pull}24513[#24513] (issue: {issue}10708[#10708]) - -Snapshot/Restore:: -* Enhances get snapshots API to allow retrieving repository index only {pull}24477[#24477] (issue: {issue}24288[#24288]) - - - -[[bug-6.0.0-alpha2]] -[float] -=== Bug fixes - -Aggregations:: -* Terms aggregation should remap global ordinal buckets when a sub-aggregator is used to sort the terms {pull}24941[#24941] (issue: {issue}24788[#24788]) -* Correctly set doc_count when MovAvg "predicts" values on existing buckets {pull}24892[#24892] (issue: {issue}24327[#24327]) -* DateHistogram: Fix `extended_bounds` with `offset` {pull}23789[#23789] (issue: {issue}23776[#23776]) -* Fix ArrayIndexOutOfBoundsException when no ranges are specified in the query {pull}23241[#23241] (issue: {issue}22881[#22881]) - -Analysis:: -* PatternAnalyzer should lowercase wildcard queries when `lowercase` is true. {pull}24967[#24967] - -Cache:: -* fix bug of weight computation {pull}24856[#24856] - -Core:: -* Fix cache expire after access {pull}24546[#24546] - -Index APIs:: -* Validates updated settings on closed indices {pull}24487[#24487] (issue: {issue}23787[#23787]) - -Ingest:: -* Fix floating-point error when DateProcessor parses UNIX {pull}24947[#24947] -* add option for _ingest.timestamp to use new ZonedDateTime (5.x backport) {pull}24030[#24030] (issues: {issue}23168[#23168], {issue}23174[#23174]) - -Inner Hits:: -* Fix Source filtering in new field collapsing feature {pull}24068[#24068] (issue: {issue}24063[#24063]) - -Internal:: -* Ensure remote cluster is connected before fetching `_field_caps` {pull}24845[#24845] (issue: {issue}24763[#24763]) - -Network:: -* Fix error message if an incompatible node connects {pull}24884[#24884] - -Plugins:: -* Fix plugin installation permissions {pull}24527[#24527] (issue: {issue}24480[#24480]) - -Scroll:: -* Fix single shard scroll within a cluster with nodes in version `>= 5.3` and `<= 5.3` {pull}24512[#24512] - -Search:: -* Fix script field sort returning Double.MAX_VALUE for all documents {pull}24942[#24942] (issue: {issue}24940[#24940]) -* Compute the took time of the query after the expand phase of field collapsing {pull}24902[#24902] (issue: {issue}24900[#24900]) - -Sequence IDs:: -* Handle primary failure handling replica response {pull}24926[#24926] (issue: {issue}24935[#24935]) - -Snapshot/Restore:: -* Fix inefficient (worst case exponential) loading of snapshot repository {pull}24510[#24510] (issue: {issue}24509[#24509]) - -Stats:: -* Avoid double decrement on current query counter {pull}24922[#24922] (issues: {issue}22996[#22996], {issue}24872[#24872]) -* Adjust available and free bytes to be non-negative on huge FSes {pull}24911[#24911] (issues: {issue}23093[#23093], {issue}24453[#24453]) - -Suggesters:: -* Fix context suggester to read values from keyword type field {pull}24200[#24200] (issue: {issue}24129[#24129]) - - diff --git a/docs/reference/release-notes/6.0.0-beta1.asciidoc b/docs/reference/release-notes/6.0.0-beta1.asciidoc deleted file mode 100644 index 3064dbb47b786..0000000000000 --- a/docs/reference/release-notes/6.0.0-beta1.asciidoc +++ /dev/null @@ -1,576 +0,0 @@ -[[release-notes-6.0.0-beta1]] -== 6.0.0-beta1 Release Notes - -Also see <>. - -[[breaking-6.0.0-beta1]] -[float] -=== Breaking changes - -Aggregations:: -* Change parsing of numeric `to` and `from` parameters in `date_range` aggregation {pull}25376[#25376] (issue: {issue}17920[#17920]) - -Aliases:: -* Wrong behavior deleting alias {pull}23997[#23997] (issues: {issue}10106[#10106], {issue}23960[#23960]) - -Highlighting:: -* Remove the postings highlighter and make unified the default highlighter choice {pull}25028[#25028] - -Index APIs:: -* Remove (deprecated) support for '+' in index expressions {pull}25274[#25274] (issue: {issue}24515[#24515]) -* Delete index API to work only against concrete indices {pull}25268[#25268] (issues: {issue}2318[#2318], {issue}23997[#23997]) - -Indexed Scripts/Templates:: -* Scripting: Remove search template actions {pull}25717[#25717] - -Ingest:: -* update ingest-user-agent regexes.yml {pull}25608[#25608] -* remove ingest.new_date_format {pull}25583[#25583] - -Java REST Client:: -* Remove deprecated created and found from index, delete and bulk {pull}25516[#25516] (issues: {issue}19566[#19566], {issue}19630[#19630], {issue}19633[#19633]) - -Packaging:: -* Remove support for ES_INCLUDE {pull}25804[#25804] -* Setup: Change default heap to 1G {pull}25695[#25695] -* Use config directory to find jvm.options {pull}25679[#25679] (issue: {issue}23004[#23004]) -* Remove implicit 32-bit support {pull}25435[#25435] -* Remove default path settings {pull}25408[#25408] (issue: {issue}25357[#25357]) -* Remove path.conf setting {pull}25392[#25392] (issue: {issue}25357[#25357]) -* Honor masking of systemd-sysctl.service {pull}24234[#24234] (issues: {issue}21899[#21899], {issue}806[#806]) - -Plugin Analysis ICU:: -* Upgrade icu4j for the ICU analysis plugin to 59.1 {pull}25243[#25243] (issue: {issue}21425[#21425]) - -Plugin Discovery Azure Classic:: -* Remove `discovery.type` BWC layer from the EC2/Azure/GCE plugins {pull}25080[#25080] (issue: {issue}24543[#24543]) - -Plugin Repository GCS:: -* GCS Repository: Remove specifying credential file on disk {pull}24727[#24727] - -Plugins:: -* Make plugin loading stricter {pull}25405[#25405] - -Query DSL:: -* Refactor QueryStringQuery for 6.0 {pull}25646[#25646] (issue: {issue}25574[#25574]) -* Change `split_on_whitespace` default to false {pull}25570[#25570] (issue: {issue}25470[#25470]) -* Remove deprecated template query {pull}24577[#24577] (issue: {issue}19390[#19390]) - -REST:: -* IndexClosedException to return 400 rather than 403 {pull}25752[#25752] -* Remove comma-separated feature parsing for GetIndicesAction {pull}24723[#24723] (issue: {issue}24437[#24437]) -* Improve REST error handling when endpoint does not support HTTP verb, add OPTIONS support {pull}24437[#24437] (issues: {issue}0[#0], {issue}15335[#15335], {issue}17916[#17916]) - -Scripting:: -* remove lang url parameter from stored script requests {pull}25779[#25779] (issue: {issue}22887[#22887]) -* Disallow lang to be used with Stored Scripts {pull}25610[#25610] -* Remove Deprecated Script Settings {pull}24756[#24756] (issue: {issue}24532[#24532]) -* Scripting: Remove native scripts {pull}24726[#24726] (issue: {issue}19966[#19966]) -* Scripting: Remove file scripts {pull}24627[#24627] (issue: {issue}21798[#21798]) - -Search:: -* Make `index` in TermsLookup mandatory {pull}25753[#25753] (issue: {issue}25750[#25750]) -* Removes FieldStats API {pull}25628[#25628] (issue: {issue}25577[#25577]) -* Remove deprecated fielddata_fields from search request {pull}25566[#25566] (issue: {issue}25537[#25537]) -* Removes deprecated fielddata_fields {pull}25537[#25537] (issue: {issue}19027[#19027]) - -Settings:: -* Settings: Remove shared setting property {pull}24728[#24728] -* Settings: Remove support for yaml and json config files {pull}24664[#24664] (issue: {issue}19391[#19391]) - -Similarities:: -* Similarity should accept dynamic settings when possible {pull}20339[#20339] (issue: {issue}6727[#6727]) - - - -[[breaking-java-6.0.0-beta1]] -[float] -=== Breaking Java changes - -Aggregations:: -* Remove the unused SignificantTerms.compareTerm() method {pull}24714[#24714] -* Make SignificantTerms.Bucket an interface rather than an abstract class {pull}24670[#24670] (issue: {issue}24492[#24492]) - -Internal:: -* Collapses package structure for some bucket aggs {pull}25579[#25579] (issue: {issue}22868[#22868]) - -Java API:: -* Remove deprecated IdsQueryBuilder ctor {pull}25529[#25529] -* Removing unneeded getTookInMillis method {pull}23923[#23923] - -Java High Level REST Client:: -* Unify the result interfaces from get and search in Java client {pull}25361[#25361] (issue: {issue}16440[#16440]) -* Allow RestHighLevelClient to use plugins {pull}25024[#25024] - -Java REST Client:: -* Rename client artifacts {pull}25693[#25693] (issue: {issue}20248[#20248]) - -Plugin Delete By Query:: -* Move DeleteByQuery and Reindex requests into core {pull}24578[#24578] - -Query DSL:: -* Remove QueryParseContext {pull}25486[#25486] -* Remove QueryParseContext from parsing QueryBuilders {pull}25448[#25448] - -REST:: -* Return index name and empty map for /`{index}`/_alias with no aliases {pull}25114[#25114] (issues: {issue}24723[#24723], {issue}25090[#25090]) - - - -[[deprecation-6.0.0-beta1]] -[float] -=== Deprecations - -Index APIs:: -* Deprecated use of + in index expressions {pull}24585[#24585] (issue: {issue}24515[#24515]) - -Indexed Scripts/Templates:: -* Scripting: Deprecate stored search template apis {pull}25437[#25437] (issue: {issue}24596[#24596]) - -Percolator:: -* Deprecate percolate query's document_type parameter. {pull}25199[#25199] - -Scripting:: -* Scripting: Change keys for inline/stored scripts to source/id {pull}25127[#25127] -* Scripting: Deprecate native scripts {pull}24692[#24692] (issue: {issue}19966[#19966]) -* Scripting: Deprecate index lookup {pull}24691[#24691] (issue: {issue}19359[#19359]) -* Deprecate Fine Grain Settings for Scripts {pull}24573[#24573] (issue: {issue}24532[#24532]) -* Scripting: Deprecate file script settings {pull}24555[#24555] (issue: {issue}21798[#21798]) -* Scripting: Deprecate file scripts {pull}24552[#24552] (issue: {issue}21798[#21798]) - -Settings:: -* Settings: Update settings deprecation from yml to yaml {pull}24663[#24663] (issue: {issue}19391[#19391]) - -Tribe Node:: -* Deprecate tribe service {pull}24598[#24598] (issue: {issue}24581[#24581]) - - - -[[feature-6.0.0-beta1]] -[float] -=== New features - -Analysis:: -* Expose simplepattern and simplepatternsplit tokenizers {pull}25159[#25159] (issue: {issue}23363[#23363]) -* Parse synonyms with the same analysis chain {pull}8049[#8049] (issue: {issue}7199[#7199]) - -Parent/Child:: -* Move parent_id query to the parent-join module {pull}25072[#25072] (issue: {issue}20257[#20257]) -* Introduce ParentJoinFieldMapper, a field mapper that creates parent/child relation within documents of the same index {pull}24978[#24978] (issue: {issue}20257[#20257]) - -Search:: -* Automatically early terminate search query based on index sorting {pull}24864[#24864] (issue: {issue}6720[#6720]) - -Sequence IDs:: -* Add a scheduled translog retention check {pull}25622[#25622] (issues: {issue}10708[#10708], {issue}25294[#25294]) -* Initialize sequence numbers on a shrunken index {pull}25321[#25321] (issue: {issue}10708[#10708]) -* Initialize primary term for shrunk indices {pull}25307[#25307] (issue: {issue}10708[#10708]) -* Introduce translog size and age based retention policies {pull}25147[#25147] (issue: {issue}10708[#10708]) - -Stats:: -* Adds nodes usage API to monitor usages of actions {pull}24169[#24169] - -Task Manager:: -* Task Management {pull}15117[#15117] - -Upgrade API:: -* TemplateUpgraders should be called during rolling restart {pull}25263[#25263] (issues: {issue}24379[#24379], {issue}24680[#24680]) - - - -[[enhancement-6.0.0-beta1]] -[float] -=== Enhancements - -Aggregations:: -* Add strict parsing of aggregation ranges {pull}25769[#25769] -* Adds rewrite phase to aggregations {pull}25495[#25495] (issue: {issue}17676[#17676]) -* Tweak AggregatorBase.addRequestCircuitBreakerBytes {pull}25162[#25162] (issue: {issue}24511[#24511]) -* Add superset size to Significant Term REST response {pull}24865[#24865] -* Add document count to Matrix Stats aggregation response {pull}24776[#24776] -* Adds an implementation of LogLogBeta for the cardinality aggregation {pull}22323[#22323] (issue: {issue}22230[#22230]) - -Allocation:: -* Adjust status on bad allocation explain requests {pull}25503[#25503] (issue: {issue}25458[#25458]) -* Promote replica on the highest version node {pull}25277[#25277] (issue: {issue}10708[#10708]) - -Analysis:: -* [Analysis] Support normalizer in request param {pull}24767[#24767] (issue: {issue}23347[#23347]) -* Enforce validation for PathHierarchy tokenizer {pull}23510[#23510] -* [analysis-icu] Allow setting unicodeSetFilter {pull}20814[#20814] (issue: {issue}20820[#20820]) - -CAT API:: -* expand `/_cat/nodes` to return information about hard drive {pull}21775[#21775] (issue: {issue}21679[#21679]) - -Cluster:: -* Validate a joining node's version with version of existing cluster nodes {pull}25808[#25808] -* Switch indices read-only if a node runs out of disk space {pull}25541[#25541] (issue: {issue}24299[#24299]) -* Add a cluster block that allows to delete indices that are read-only {pull}24678[#24678] - -Core:: -* Add max file size bootstrap check {pull}25974[#25974] -* Add compatibility versions to main action response {pull}25799[#25799] -* Index ids in binary form. {pull}25352[#25352] (issues: {issue}18154[#18154], {issue}24615[#24615]) -* Explicitly reject duplicate data paths {pull}25178[#25178] -* Use SPI in High Level Rest Client to load XContent parsers {pull}25097[#25097] -* Upgrade to lucene-7.0.0-snapshot-a0aef2f {pull}24775[#24775] -* Speed up PK lookups at index time. {pull}19856[#19856] - -Engine:: -* Add refresh stats tracking for realtime get {pull}25052[#25052] (issue: {issue}24806[#24806]) -* Introducing a translog deletion policy {pull}24950[#24950] - -Exceptions:: -* IllegalStateException: Only duplicated jar instead of classpath {pull}24953[#24953] - -Highlighting:: -* Picks offset source for the unified highlighter directly from the es mapping {pull}25747[#25747] (issue: {issue}25699[#25699]) - -Index APIs:: -* Let primary own its replication group {pull}25692[#25692] (issue: {issue}25485[#25485]) -* Create index request should return the index name {pull}25139[#25139] (issue: {issue}23044[#23044]) - -Ingest:: -* Add Ingest-Processor specific Rest Endpoints & Add Grok endpoint {pull}25059[#25059] (issue: {issue}24725[#24725]) -* Port support for commercial GeoIP2 databases from Logstash. {pull}24889[#24889] -* add `exclude_keys` option to KeyValueProcessor {pull}24876[#24876] (issue: {issue}23856[#23856]) -* Allow removing multiple fields in ingest processor {pull}24750[#24750] (issue: {issue}24622[#24622]) -* Add target_field parameter to ingest processors {pull}24133[#24133] (issues: {issue}23228[#23228], {issue}23682[#23682]) - -Inner Hits:: -* Reuse inner hit query weight {pull}24571[#24571] (issue: {issue}23917[#23917]) - -Internal:: -* Cleanup IndexFieldData visibility {pull}25900[#25900] -* Bump the min compat version to 5.6.0 {pull}25805[#25805] -* "shard started" should show index and shard ID {pull}25157[#25157] -* Break out clear scroll logic from TransportClearScrollAction {pull}25125[#25125] (issue: {issue}25094[#25094]) -* Add helper methods to TransportActionProxy to identify proxy actions and requests {pull}25124[#25124] -* Add remote cluster infrastructure to fetch discovery nodes. {pull}25123[#25123] (issue: {issue}25094[#25094]) -* Add the ability to set eager_global_ordinals in the new parent-join field {pull}25019[#25019] -* Disallow multiple parent-join fields per mapping {pull}25002[#25002] -* Remove the need for _UNRELEASED suffix in versions {pull}24798[#24798] (issue: {issue}24768[#24768]) -* Optimize the order of bytes in uuids for better compression. {pull}24615[#24615] (issue: {issue}18209[#18209]) - -Java API:: -* Always Accumulate Transport Exceptions {pull}25017[#25017] (issue: {issue}23099[#23099]) - -Java High Level REST Client:: -* [DOCS] restructure java clients docs pages {pull}25517[#25517] -* Use SPI in High Level Rest Client to load XContent parsers {pull}25098[#25098] (issues: {issue}25024[#25024], {issue}25097[#25097]) -* Add support for clear scroll to high level REST client {pull}25038[#25038] -* Add search scroll method to high level REST client {pull}24938[#24938] (issue: {issue}23331[#23331]) -* Add search method to high level REST client {pull}24796[#24796] (issues: {issue}24794[#24794], {issue}24795[#24795]) - -Java REST Client:: -* Shade external dependencies in the rest client jar {pull}25780[#25780] (issue: {issue}25208[#25208]) -* RestClient uses system properties and system default SSLContext {pull}25757[#25757] (issue: {issue}23231[#23231]) - -Logging:: -* Prevent excessive disk consumption by log files {pull}25660[#25660] -* Use LRU set to reduce repeat deprecation messages {pull}25474[#25474] (issue: {issue}25457[#25457]) - -Mapping:: -* Better validation of `copy_to`. {pull}25983[#25983] -* Optimize `terms` queries on `ip` addresses to use a `PointInSetQuery` whenever possible. {pull}25669[#25669] (issue: {issue}25667[#25667]) - -Network:: -* Move TransportStats accounting into TcpTransport {pull}25251[#25251] -* Simplify connection closing and cleanups in TcpTransport {pull}25250[#25250] -* Disable the Netty recycler in the client {pull}24793[#24793] (issues: {issue}22452[#22452], {issue}24721[#24721]) -* Remove Netty logging hack {pull}24653[#24653] (issues: {issue}24469[#24469], {issue}5624[#5624], {issue}6568[#6568], {issue}6696[#6696]) - -Packaging:: -* Remove memlock suggestion from systemd service {pull}25979[#25979] -* Set address space limit in systemd service file {pull}25975[#25975] -* Version option should display if snapshot {pull}25970[#25970] -* Ignore JVM options before checking Java version {pull}25969[#25969] -* Also skip JAVA_TOOL_OPTIONS on Windows {pull}25968[#25968] -* Introduce elasticsearch-env for Windows {pull}25958[#25958] -* Introduce elasticsearch-env {pull}25815[#25815] (issue: {issue}20286[#20286]) -* Stop exporting HOSTNAME from scripts {pull}25807[#25807] - -Parent/Child:: -* Remove ParentJoinFieldSubFetchPhase {pull}25550[#25550] (issue: {issue}25363[#25363]) -* Support parent id being specified as number in the _source {pull}25547[#25547] - -Plugin Lang Painless:: -* Allow Custom Whitelists in Painless {pull}25557[#25557] -* Update Painless to Allow Augmentation from Any Class {pull}25360[#25360] -* Add Needs Methods to Painless Script Context Factories {pull}25267[#25267] -* Support Script Context Stateful Factory in Painless {pull}25233[#25233] -* Generate Painless Factory for Creating Script Instances {pull}25120[#25120] -* Update Painless to Use New Script Contexts {pull}25015[#25015] -* Optimize instance creation in LambdaBootstrap {pull}24618[#24618] - -Plugin Repository GCS:: -* GCS Repository: Add secure storage of credentials {pull}24697[#24697] - -Plugin Repository S3:: -* S3 Repository: Add back repository level credentials {pull}24609[#24609] - -Plugins:: -* Move tribe to a module {pull}25778[#25778] -* Plugins can register pre-configured char filters {pull}25000[#25000] (issue: {issue}23658[#23658]) -* Add purge option to remove plugin CLI {pull}24981[#24981] -* Allow plugins to register pre-configured tokenizers {pull}24751[#24751] (issues: {issue}24223[#24223], {issue}24572[#24572]) -* Move ReindexAction class to core {pull}24684[#24684] (issue: {issue}24578[#24578]) -* Make PreConfiguredTokenFilter harder to misuse {pull}24572[#24572] (issue: {issue}23658[#23658]) - -Query DSL:: -* Make slop optional when parsing `span_near` query {pull}25677[#25677] (issue: {issue}25642[#25642]) -* Require a field when a `seed` is provided to the `random_score` function. {pull}25594[#25594] (issue: {issue}25240[#25240]) - -REST:: -* Refactor PathTrie and RestController to use a single trie for all methods {pull}25459[#25459] (issue: {issue}24437[#24437]) -* Make ObjectParser support string to boolean conversion {pull}24668[#24668] (issue: {issue}21802[#21802]) - -Recovery:: -* Goodbye, Translog Views {pull}25962[#25962] -* Disallow multiple concurrent recovery attempts for same target shard {pull}25428[#25428] -* Live primary-replica resync (no rollback) {pull}24841[#24841] (issue: {issue}10708[#10708]) - -Scripting:: -* Scripting: Rename SearchScript.needsScores to needs_score {pull}25235[#25235] -* Scripting: Add optional context parameter to put stored script requests {pull}25014[#25014] -* Add New Security Script Settings {pull}24637[#24637] (issue: {issue}24532[#24532]) - -Search:: -* Rewrite search requests on the coordinating nodes {pull}25814[#25814] (issue: {issue}25791[#25791]) -* Ensure query resources are fetched asynchronously during rewrite {pull}25791[#25791] -* Introduce a new Rewriteable interface to streamline rewriting {pull}25788[#25788] -* Reduce the scope of `QueryRewriteContext` {pull}25787[#25787] -* Reduce the overhead of timeouts and low-level search cancellation. {pull}25776[#25776] -* Reduce profiling overhead. {pull}25772[#25772] (issue: {issue}24799[#24799]) -* Prevent `can_match` requests from sending to incompatible nodes {pull}25705[#25705] (issue: {issue}25704[#25704]) -* Add a shard filter search phase to pre-filter shards based on query rewriting {pull}25658[#25658] -* Ensure we rewrite common queries to `match_none` if possible {pull}25650[#25650] -* Limit the number of concurrent shard requests per search request {pull}25632[#25632] -* Add cluster name validation to RemoteClusterConnection {pull}25568[#25568] -* Speed up sorted scroll when the index sort matches the search sort {pull}25138[#25138] (issue: {issue}6720[#6720]) -* Leverage scorerSupplier when applicable. {pull}25109[#25109] -* Add Cross Cluster Search support for scroll searches {pull}25094[#25094] -* Track EWMA[1] of task execution time in search threadpool executor {pull}24989[#24989] (issue: {issue}24915[#24915]) -* Query range fields by doc values when they are expected to be more efficient than points {pull}24823[#24823] (issue: {issue}24314[#24314]) -* Search: Fairer balancing when routing searches by session ID {pull}24671[#24671] (issue: {issue}24642[#24642]) - -Sequence IDs:: -* Move primary term from ReplicationRequest to ConcreteShardRequest {pull}25822[#25822] -* Add reason to global checkpoint updates on replica {pull}25612[#25612] (issue: {issue}10708[#10708]) -* Introduce primary/replica mode for GlobalCheckPointTracker {pull}25468[#25468] -* Throw back replica local checkpoint on new primary {pull}25452[#25452] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Update global checkpoint when increasing primary term on replica {pull}25422[#25422] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Enable a long translog retention policy by default {pull}25294[#25294] (issues: {issue}10708[#10708], {issue}25147[#25147]) -* Introduce primary context {pull}25122[#25122] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Block older operations on primary term transition {pull}24779[#24779] (issue: {issue}10708[#10708]) - -Settings:: -* Add disk threshold settings validation {pull}25600[#25600] (issue: {issue}25560[#25560]) -* Enable cross-setting validation {pull}25560[#25560] (issue: {issue}25541[#25541]) -* Validate `transport.profiles.*` settings {pull}25508[#25508] -* Cleanup network / transport related settings {pull}25489[#25489] -* Emit settings deprecation logging at most once {pull}25457[#25457] -* IndexMetaData: Introduce internal format index setting {pull}25292[#25292] - -Snapshot/Restore:: -* Improves snapshot logging and snapshot deletion error handling {pull}25264[#25264] - -Stats:: -* Update `IndexShard#refreshMetric` via a `ReferenceManager.RefreshListener` {pull}25083[#25083] (issues: {issue}24806[#24806], {issue}25052[#25052]) - -Translog:: -* Translog file recovery should not rely on lucene commits {pull}25005[#25005] (issue: {issue}24950[#24950]) - - - -[[bug-6.0.0-beta1]] -[float] -=== Bug fixes - -Aggregations:: -* Fixes array out of bounds for value count agg {pull}26038[#26038] (issue: {issue}17379[#17379]) -* Aggregations bug: Significant_text fails on arrays of text. {pull}25030[#25030] (issue: {issue}25029[#25029]) - -Aliases:: -* mget with an alias shouldn't ignore alias routing {pull}25697[#25697] (issue: {issue}25696[#25696]) -* GET aliases should 404 if aliases are missing {pull}25043[#25043] (issue: {issue}24644[#24644]) - -Analysis:: -* Pre-configured shingle filter should disable graph analysis {pull}25853[#25853] (issue: {issue}25555[#25555]) - -Circuit Breakers:: -* Checks the circuit breaker before allocating bytes for a new big array {pull}25010[#25010] (issue: {issue}24790[#24790]) - -Core:: -* Release operation permit on thread-pool rejection {pull}25930[#25930] (issue: {issue}25863[#25863]) -* Node should start up despite of a lingering `.es_temp_file` {pull}21210[#21210] (issue: {issue}21007[#21007]) - -Discovery:: -* MasterNodeChangePredicate should use the node instance to detect master change {pull}25877[#25877] (issue: {issue}25471[#25471]) - -Engine:: -* Engine - do not index operations with seq# lower than the local checkpoint into lucene {pull}25827[#25827] (issues: {issue}1[#1], {issue}2[#2], {issue}25592[#25592]) - -Geo:: -* Fix typo in GeoUtils#isValidLongitude {pull}25121[#25121] - -Highlighting:: -* FastVectorHighlighter should not cache the field query globally {pull}25197[#25197] (issue: {issue}25171[#25171]) -* Higlighters: Fix MultiPhrasePrefixQuery rewriting {pull}25103[#25103] (issue: {issue}25088[#25088]) - -Index APIs:: -* Shrink API should ignore templates {pull}25380[#25380] (issue: {issue}25035[#25035]) -* Rollover max docs should only count primaries {pull}24977[#24977] (issue: {issue}24217[#24217]) - -Ingest:: -* Sort Processor does not have proper behavior with targetField {pull}25237[#25237] (issue: {issue}24133[#24133]) -* fix grok's pattern parsing to validate pattern names in expression {pull}25063[#25063] (issue: {issue}22831[#22831]) - -Inner Hits:: -* When fetching nested inner hits only access stored fields when needed {pull}25864[#25864] (issue: {issue}6[#6]) - -Internal:: -* Fix BytesReferenceStreamInput#skip with offset {pull}25634[#25634] -* Fix race condition in RemoteClusterConnection node supplier {pull}25432[#25432] -* Initialise empty lists in BaseTaskResponse constructor {pull}25290[#25290] -* Extract a common base class for scroll executions {pull}24979[#24979] (issue: {issue}16555[#16555]) -* Obey lock order if working with store to get metadata snapshots {pull}24787[#24787] (issue: {issue}24481[#24481]) -* Fix Version based BWC and set correct minCompatVersion {pull}24732[#24732] -* Fix `_field_caps` serialization in order to support cross cluster search {pull}24722[#24722] -* Avoid race when shutting down controller processes {pull}24579[#24579] - -Mapping:: -* Fix parsing of ip range queries. {pull}25768[#25768] (issue: {issue}25636[#25636]) -* Disable date field mapping changing {pull}25285[#25285] (issue: {issue}25271[#25271]) -* Correctly enable _all for older 5.x indices {pull}25087[#25087] (issue: {issue}25068[#25068]) -* token_count datatype should handle null value {pull}25046[#25046] (issue: {issue}24928[#24928]) -* keep _parent field while updating child type mapping {pull}24407[#24407] (issue: {issue}23381[#23381]) - -More Like This:: -* Pass over _routing value with more_like_this items to be retrieved {pull}24679[#24679] (issue: {issue}23699[#23699]) - -Nested Docs:: -* In case of a single type the _id field should be added to the nested document instead of _uid field {pull}25149[#25149] - -Network:: -* Ensure pending transport handlers are invoked for all channel failures {pull}25150[#25150] -* Notify onConnectionClosed rather than onNodeDisconnect to prune transport handlers {pull}24639[#24639] (issues: {issue}24557[#24557], {issue}24575[#24575], {issue}24632[#24632]) - -Packaging:: -* Exit Windows scripts promptly on failure {pull}25959[#25959] -* Pass config path as a system property {pull}25943[#25943] -* ES_HOME needs to be made absolute before attempt at traversal {pull}25865[#25865] -* Fix elasticsearch-keystore handling of path.conf {pull}25811[#25811] -* Stop disabling explicit GC {pull}25759[#25759] -* Avoid failing install if system-sysctl is masked {pull}25657[#25657] (issue: {issue}24234[#24234]) -* Get short path name for native controllers {pull}25344[#25344] -* When stopping via systemd only kill the JVM, not its control group {pull}25195[#25195] -* remove remaining references to scripts directory {pull}24771[#24771] -* Handle parentheses in batch file path {pull}24731[#24731] (issue: {issue}24712[#24712]) - -Parent/Child:: -* The default _parent field should not try to load global ordinals {pull}25851[#25851] (issue: {issue}25849[#25849]) - -Percolator:: -* Fix range queries with date range based on current time in percolator queries. {pull}24666[#24666] (issue: {issue}23921[#23921]) - -Plugin Lang Painless:: -* Painless: allow doubles to be casted to longs. {pull}25936[#25936] - -Plugin Repository Azure:: -* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) - -Plugin Repository GCS:: -* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) - -Plugin Repository HDFS:: -* Upgrading HDFS Repository Plugin to use HDFS 2.8.1 Client {pull}25497[#25497] (issue: {issue}25450[#25450]) - -Plugin Repository S3:: -* Avoid SecurityException in repository-S3 on DefaultS3OutputStream.flush() {pull}25254[#25254] (issue: {issue}25192[#25192]) - -Plugins:: -* X-Pack plugin download fails on Windows desktop {pull}24570[#24570] - -Query DSL:: -* SpanNearQueryBuilder should return the inner clause when a single clause is provided {pull}25856[#25856] (issue: {issue}25630[#25630]) -* Refactor field expansion for match, multi_match and query_string query {pull}25726[#25726] (issues: {issue}25551[#25551], {issue}25556[#25556]) -* WrapperQueryBuilder should also rewrite the parsed query {pull}25480[#25480] - -REST:: -* Fix handling of invalid error trace parameter {pull}25785[#25785] (issue: {issue}25774[#25774]) -* Fix handling of exceptions thrown on HEAD requests {pull}25172[#25172] (issue: {issue}21125[#21125]) -* Fixed NPEs caused by requests without content. {pull}23497[#23497] (issue: {issue}24701[#24701]) -* Fix get mappings HEAD requests {pull}23192[#23192] (issue: {issue}21125[#21125]) - -Recovery:: -* Close translog view after primary-replica resync {pull}25862[#25862] (issue: {issue}24841[#24841]) - -Reindex API:: -* Reindex: don't duplicate _source parameter {pull}24629[#24629] (issue: {issue}24628[#24628]) -* Add qa module that tests reindex-from-remote against pre-5.0 versions of Elasticsearch {pull}24561[#24561] (issues: {issue}23828[#23828], {issue}24520[#24520]) - -Search:: -* Caching a MinDocQuery can lead to wrong results. {pull}25909[#25909] -* Fix random score generation when no seed is provided. {pull}25908[#25908] -* Merge FunctionScoreQuery and FiltersFunctionScoreQuery {pull}25889[#25889] (issues: {issue}15709[#15709], {issue}23628[#23628]) -* Respect cluster alias in `_index` aggs and queries {pull}25885[#25885] (issue: {issue}25606[#25606]) -* First increment shard stats before notifying and potentially sending response {pull}25818[#25818] -* Remove assertion about deviation when casting to a float. {pull}25806[#25806] (issue: {issue}25330[#25330]) -* Prevent skipping shards if a suggest builder is present {pull}25739[#25739] (issue: {issue}25658[#25658]) -* Ensure remote cluster alias is preserved in inner hits aggs {pull}25627[#25627] (issue: {issue}25606[#25606]) -* Do not search locally if remote index pattern resolves to no indices {pull}25436[#25436] (issue: {issue}25426[#25426]) -* Adds check for negative search request size {pull}25397[#25397] (issue: {issue}22530[#22530]) -* Make sure range queries are correctly profiled. {pull}25108[#25108] -* Fix RangeFieldMapper rangeQuery to properly handle relations {pull}24808[#24808] (issue: {issue}24744[#24744]) -* Fix ExpandSearchPhase when response contains no hits {pull}24688[#24688] (issue: {issue}24672[#24672]) - -Sequence IDs:: -* Fix pre-6.0 response to unknown replication actions {pull}25744[#25744] (issue: {issue}10708[#10708]) -* Track local checkpoint on primary immediately {pull}25434[#25434] (issues: {issue}10708[#10708], {issue}25355[#25355], {issue}25415[#25415]) -* Initialize max unsafe auto ID timestamp on shrink {pull}25356[#25356] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Use correct primary term for replicating NOOPs {pull}25128[#25128] -* Handle already closed while filling gaps {pull}25021[#25021] (issue: {issue}24925[#24925]) - -Settings:: -* Fix settings serialization to not serialize secure settings or not take the total size into account {pull}25323[#25323] -* Keystore CLI should use the AddFileKeyStoreCommand for files {pull}25298[#25298] -* Allow resetting settings that use an IP validator {pull}24713[#24713] (issue: {issue}24709[#24709]) - -Snapshot/Restore:: -* Snapshot/Restore: Ensure that shard failure reasons are correctly stored in CS {pull}25941[#25941] (issue: {issue}25878[#25878]) -* Output all empty snapshot info fields if in verbose mode {pull}25455[#25455] (issue: {issue}24477[#24477]) -* Remove redundant and broken MD5 checksum from repository-s3 {pull}25270[#25270] (issue: {issue}25269[#25269]) -* Consolidates the logic for cleaning up snapshots on master election {pull}24894[#24894] (issue: {issue}24605[#24605]) -* Removes completed snapshot from cluster state on master change {pull}24605[#24605] (issue: {issue}24452[#24452]) - -Stats:: -* _nodes/stats should not fail due to concurrent AlreadyClosedException {pull}25016[#25016] (issue: {issue}23099[#23099]) - -Suggesters:: -* Context suggester should filter doc values field {pull}25858[#25858] (issue: {issue}25404[#25404]) - - - -[[regression-6.0.0-beta1]] -[float] -=== Regressions - -Highlighting:: -* Fix Fast Vector Highlighter NPE on match phrase prefix {pull}25116[#25116] (issue: {issue}25088[#25088]) - -Search:: -* Always use DisjunctionMaxQuery to build cross fields disjunction {pull}25115[#25115] (issue: {issue}23966[#23966]) - - - -[[upgrade-6.0.0-beta1]] -[float] -=== Upgrades - -Network:: -* Upgrade to Netty 4.1.13.Final {pull}25581[#25581] (issues: {issue}24729[#24729], {issue}6866[#6866]) -* Upgrade to Netty 4.1.11.Final {pull}24652[#24652] - -Upgrade API:: -* Improve stability and logging of TemplateUpgradeServiceIT tests {pull}25386[#25386] (issue: {issue}25382[#25382]) diff --git a/docs/reference/release-notes/6.0.0-beta2.asciidoc b/docs/reference/release-notes/6.0.0-beta2.asciidoc deleted file mode 100644 index 55264b8f0e2e7..0000000000000 --- a/docs/reference/release-notes/6.0.0-beta2.asciidoc +++ /dev/null @@ -1,129 +0,0 @@ -[[release-notes-6.0.0-beta2]] -== 6.0.0-beta2 Release Notes - -Also see <>. - -[[breaking-6.0.0-beta2]] -[float] -=== Breaking changes - -Analysis:: -* Do not allow custom analyzers to have the same names as built-in analyzers {pull}22349[#22349] (issue: {issue}22263[#22263]) - -Cluster:: -* Disallow : in cluster and index/alias names {pull}26247[#26247] (issue: {issue}23892[#23892]) - -Inner Hits:: -* Unfiltered nested source should keep its full path {pull}26102[#26102] (issues: {issue}18567[#18567], {issue}23090[#23090]) - -Mapping:: -* Reject out of range numbers for float, double and half_float {pull}25826[#25826] (issue: {issue}25534[#25534]) - -Network:: -* Remove unused Netty-related settings {pull}26161[#26161] - -Packaging:: -* Rename CONF_DIR to ES_PATH_CONF {pull}26197[#26197] (issue: {issue}26154[#26154]) - -Query DSL:: -* Throw exception in scroll requests using `from` {pull}26235[#26235] (issue: {issue}9373[#9373]) - - - -[[breaking-java-6.0.0-beta2]] -[float] -=== Breaking Java changes - -Aggregations:: -* Fix NPE when `values` is omitted on percentile_ranks agg {pull}26046[#26046] - - - -[[enhancement-6.0.0-beta2]] -[float] -=== Enhancements - -Aggregations:: -* Support distance units in GeoHashGrid aggregation precision {pull}26291[#26291] (issue: {issue}5042[#5042]) -* Reject multiple methods in `percentiles` aggregation {pull}26163[#26163] (issue: {issue}26095[#26095]) -* Use `global_ordinals_hash` execution mode when sorting by sub aggregations. {pull}26014[#26014] (issue: {issue}24359[#24359]) -* Add a specialized deferring collector for terms aggregator {pull}25190[#25190] - -Core:: -* Use Java 9 FilePermission model {pull}26302[#26302] (issue: {issue}21534[#21534]) -* Add friendlier message on bad keystore permissions {pull}26284[#26284] -* Epoch millis and second formats accept float implicitly {pull}26119[#26119] (issue: {issue}14641[#14641]) - -Internal:: -* Prevent cluster internal `ClusterState.Custom` impls to leak to a client {pull}26232[#26232] -* Use holder pattern for lazy deprecation loggers {pull}26218[#26218] (issue: {issue}26210[#26210]) -* Allow `ClusterState.Custom` to be created on initial cluster states {pull}26144[#26144] - -Java High Level REST Client:: -* Make RestHighLevelClient Closeable and simplify its creation {pull}26180[#26180] (issue: {issue}26086[#26086]) - -Mapping:: -* Loosen the restrictions on disabling _all in 6.x {pull}26259[#26259] - -Percolator:: -* Store the QueryBuilder's Writable representation instead of its XContent representation {pull}25456[#25456] -* Add support for selecting percolator query candidate matches containing wildcard / prefix queries {pull}25351[#25351] - -Settings:: -* Persist created keystore on startup unless keystore is present {pull}26253[#26253] (issue: {issue}26126[#26126]) -* Settings: Add keystore.seed auto generated secure setting {pull}26149[#26149] -* Settings: Add keystore creation to add commands {pull}26126[#26126] - - - -[[bug-6.0.0-beta2]] -[float] -=== Bug fixes - -Aggregations:: -* Check bucket metric ages point to a multi bucket agg {pull}26215[#26215] (issue: {issue}25775[#25775]) - -Allocation:: -* Fix DiskThresholdMonitor flood warning {pull}26204[#26204] (issue: {issue}26201[#26201]) -* Allow wildcards for shard IP filtering {pull}26187[#26187] (issues: {issue}22591[#22591], {issue}26184[#26184]) - -CRUD:: -* Serialize and expose timeout of acknowledged requests in REST layer {pull}26189[#26189] (issue: {issue}26213[#26213]) -* Fix silent loss of last command to _bulk and _msearch due to missing newline {pull}25740[#25740] (issue: {issue}7601[#7601]) - -Cluster:: -* Register setting `cluster.indices.tombstones.size` {pull}26193[#26193] (issue: {issue}26191[#26191]) - -Highlighting:: -* Fix nested query highlighting {pull}26305[#26305] (issue: {issue}26230[#26230]) - -Logging:: -* Allow not configure logging without config {pull}26209[#26209] (issues: {issue}20575[#20575], {issue}24076[#24076]) - -Mapping:: -* ICUCollationKeywordFieldMapper use SortedSetDocValuesField {pull}26267[#26267] -* Fix serialization of the `_all` field. {pull}26143[#26143] (issue: {issue}26136[#26136]) - -Network:: -* Release pipelined http responses on close {pull}26226[#26226] - -Packaging:: -* Detect modified keystore on package removal {pull}26300[#26300] -* Create keystore on RPM and Debian package install {pull}26282[#26282] -* Add safer empty variable checking for Windows {pull}26268[#26268] (issue: {issue}26261[#26261]) -* Export HOSTNAME environment variable {pull}26262[#26262] (issues: {issue}25807[#25807], {issue}26255[#26255]) -* Fix daemonization command status test {pull}26196[#26196] (issue: {issue}26080[#26080]) -* Set RuntimeDirectory in systemd service {pull}23526[#23526] - -Search:: -* Refactor simple_query_string to handle text part like multi_match and query_string {pull}26145[#26145] (issue: {issue}25726[#25726]) -* Fix `_exists_` in query_string on empty indices. {pull}25993[#25993] (issue: {issue}25956[#25956]) - - - -[[upgrade-6.0.0-beta2]] -[float] -=== Upgrades - -Core:: -* Upgrade to lucene-7.0.0-snapshot-a128fcb. {pull}26090[#26090] diff --git a/docs/reference/release-notes/6.0.0-rc1.asciidoc b/docs/reference/release-notes/6.0.0-rc1.asciidoc deleted file mode 100644 index d57fe4b12050b..0000000000000 --- a/docs/reference/release-notes/6.0.0-rc1.asciidoc +++ /dev/null @@ -1,141 +0,0 @@ -[[release-notes-6.0.0-rc1]] -== 6.0.0-rc1 Release Notes - -Also see <>. - -[[breaking-6.0.0-rc1]] -[float] -=== Breaking changes - -Packaging:: -* Configure heap dump path out of the box {pull}26755[#26755] (issue: {issue}26665[#26665]) - -Query DSL:: -* Remove deprecated `type` and `slop` field in `match` query {pull}26720[#26720] -* Remove several parse field deprecations in query builders {pull}26711[#26711] -* Remove deprecated parameters from `ids_query` {pull}26508[#26508] - - - -[[deprecation-6.0.0-rc1]] -[float] -=== Deprecations - -Plugins:: -* Plugins: Add backcompat for sha1 checksums {pull}26748[#26748] (issue: {issue}26746[#26746]) - - - -[[enhancement-6.0.0-rc1]] -[float] -=== Enhancements - -Core:: -* Allow `InputStreamStreamInput` array size validation where applicable {pull}26692[#26692] -* Refactor bootstrap check results and error messages {pull}26637[#26637] -* Add BootstrapContext to expose settings and recovered state to bootstrap checks {pull}26628[#26628] -* Unit testable index creation task on MetaDataCreateIndexService {pull}25961[#25961] - -Discovery:: -* Allow plugins to validate cluster-state on join {pull}26595[#26595] - -Mapping:: -* More efficient encoding of range fields. {pull}26470[#26470] (issue: {issue}26443[#26443]) - -Plugin Repository HDFS:: -* Add permission checks before reading from HDFS stream {pull}26716[#26716] (issue: {issue}26714[#26714]) - -Recovery:: -* Introduce a History UUID as a requirement for ops based recovery {pull}26577[#26577] (issue: {issue}10708[#10708]) - -Scripting:: -* ScriptService: Replace max compilation per minute setting with max compilation rate {pull}26399[#26399] - -Search:: -* Add soft limit on allowed number of script fields in request {pull}26598[#26598] (issue: {issue}26390[#26390]) -* Add a soft limit for the number of requested doc-value fields {pull}26574[#26574] (issue: {issue}26390[#26390]) - -Sequence IDs:: -* Restoring from snapshot should force generation of a new history uuid {pull}26694[#26694] (issues: {issue}10708[#10708], {issue}26544[#26544], {issue}26557[#26557], {issue}26577[#26577]) -* Add global checkpoint tracking on the primary {pull}26666[#26666] (issue: {issue}26591[#26591]) -* Introduce global checkpoint background sync {pull}26591[#26591] (issues: {issue}26573[#26573], {issue}26630[#26630], {issue}26666[#26666]) -* Move `UNASSIGNED_SEQ_NO` and `NO_OPS_PERFORMED` to SequenceNumbers` {pull}26494[#26494] (issue: {issue}10708[#10708]) - - - -[[bug-6.0.0-rc1]] -[float] -=== Bug fixes - -Aggregations:: -* Do not delegate a null scorer to LeafBucketCollectors {pull}26747[#26747] (issue: {issue}26611[#26611]) - -Core:: -* Fix cache compute if absent for expired entries {pull}26516[#26516] - -Dates:: -* Fix typo in date format {pull}26503[#26503] (issue: {issue}26500[#26500]) - -Highlighting:: -* Fix percolator highlight sub fetch phase to not highlight query twice {pull}26622[#26622] - -Inner Hits:: -* Do not allow inner hits that fetch _source and have a non nested object field as parent {pull}25749[#25749] (issue: {issue}25315[#25315]) - -Internal:: -* `IndexShard.routingEntry` should only be updated once all internal state is ready {pull}26776[#26776] -* Catch exceptions and inform handler in RemoteClusterConnection#collectNodes {pull}26725[#26725] (issue: {issue}26700[#26700]) -* Internal: Add versionless alias for rest client codebase in policy files {pull}26521[#26521] - -Java API:: -* BulkProcessor flush runnable preserves the thread context from creation time {pull}26718[#26718] (issue: {issue}26596[#26596]) - -Java High Level REST Client:: -* Make RestHighLevelClient's Request class public {pull}26627[#26627] (issue: {issue}26455[#26455]) -* Forbid direct usage of ContentType.create() methods {pull}26457[#26457] (issues: {issue}22769[#22769], {issue}26438[#26438]) - -Java REST Client:: -* Better message text for ResponseException {pull}26564[#26564] - -Mapping:: -* Allow copying from a field to another field that belongs to the same nested object. {pull}26774[#26774] (issue: {issue}26763[#26763]) - -Plugin Analysis Kuromoji:: -* Fix kuromoji default stoptags {pull}26600[#26600] (issue: {issue}26519[#26519]) - -Plugin Discovery File:: -* Fix discovery-file plugin to use custom config path {pull}26662[#26662] (issue: {issue}26660[#26660]) - -Plugin Repository Azure:: -* Azure snapshots can not be restored anymore {pull}26778[#26778] (issues: {issue}22858[#22858], {issue}26751[#26751], {issue}26777[#26777]) -* Snapshot : azure module - accelerate the listing of files (used in delete snapshot) {pull}25710[#25710] (issue: {issue}25424[#25424]) - -Plugin Repository HDFS:: -* Add Log4j to SLF4J binding for repository-hdfs {pull}26514[#26514] (issue: {issue}26512[#26512]) - -Query DSL:: -* Fixed incomplete JSON body on count request making org.elasticsearch.rest.action.RestActions#parseTopLevelQueryBuilder go into endless loop {pull}26680[#26680] (issue: {issue}26083[#26083]) - -Search:: -* Fail query when a sort is provided in conjunction with rescorers {pull}26510[#26510] -* Let search phases override max concurrent requests {pull}26484[#26484] (issue: {issue}26198[#26198]) - -Similarities:: -* Add boolean similarity to built in similarity types {pull}26613[#26613] - -Upgrade API:: -* Upgrade API: fix excessive logging and unnecessary template updates {pull}26698[#26698] (issue: {issue}26673[#26673]) - - - -[[upgrade-6.0.0-rc1]] -[float] -=== Upgrades - -Core:: -* Upgrade to Lucene 7.0.0 {pull}26744[#26744] -* Upgrade to lucene-7.0.0-snapshot-d94a5f0. {pull}26441[#26441] - -Logging:: -* Upgrade to Log4j 2.9.1 {pull}26750[#26750] (issues: {issue}109[#109], {issue}26464[#26464], {issue}26467[#26467]) -* Upgrade to Log4j 2.9.0 {pull}26450[#26450] (issue: {issue}23798[#23798]) diff --git a/docs/reference/release-notes/6.0.0-rc2.asciidoc b/docs/reference/release-notes/6.0.0-rc2.asciidoc deleted file mode 100644 index e1e296b7436f5..0000000000000 --- a/docs/reference/release-notes/6.0.0-rc2.asciidoc +++ /dev/null @@ -1,118 +0,0 @@ -[[release-notes-6.0.0-rc2]] -== 6.0.0-rc2 Release Notes - -Also see <>. - -[[breaking-6.0.0-rc2]] -[float] -=== Breaking changes - -Inner Hits:: -* Return the _source of inner hit nested as is without wrapping it into its full path context {pull}26982[#26982] (issues: {issue}26102[#26102], {issue}26944[#26944]) - - - -[[enhancement-6.0.0-rc2]] -[float] -=== Enhancements - -Core:: -* Ignore .DS_Store files on macOS {pull}27108[#27108] (issue: {issue}23982[#23982]) - -Index Templates:: -* Fix error message for a put index template request without index_patterns {pull}27102[#27102] (issue: {issue}27100[#27100]) - -Mapping:: -* Don't detect source's XContentType in DocumentParser.parseDocument() {pull}26880[#26880] - -Network:: -* Add additional low-level logging handler {pull}26887[#26887] -* Unwrap causes when maybe dying {pull}26884[#26884] - -Plugins:: -* Adjust SHA-512 supported format on plugin install {pull}27093[#27093] - -REST:: -* Cat shards bytes {pull}26952[#26952] - - - -[[bug-6.0.0-rc2]] -[float] -=== Bug fixes - -Aggregations:: -* Create weights lazily in filter and filters aggregation {pull}26983[#26983] -* Fix IndexOutOfBoundsException in histograms for NaN doubles (#26787) {pull}26856[#26856] (issue: {issue}26787[#26787]) -* Scripted_metric _agg parameter disappears if params are provided {pull}19863[#19863] (issue: {issue}19768[#19768]) - -CAT API:: -* Fix NPE for /_cat/indices when no primary shard {pull}26953[#26953] (issue: {issue}26942[#26942]) - -Cache:: -* Reduce the default number of cached queries. {pull}26949[#26949] (issue: {issue}26938[#26938]) - -Core:: -* Timed runnable should delegate to abstract runnable {pull}27095[#27095] (issue: {issue}27069[#27069]) -* Stop invoking non-existent syscall {pull}27016[#27016] (issue: {issue}20179[#20179]) -* MetaData Builder doesn't properly prevent an alias with the same name as an index {pull}26804[#26804] - -Ingest:: -* date processor should not fail if timestamp is specified as json number {pull}26986[#26986] (issue: {issue}26967[#26967]) -* date_index_name processor should not fail if timestamp is specified as json number {pull}26910[#26910] (issue: {issue}26890[#26890]) - -Internal:: -* Upgrade Lucene to version 7.0.1 {pull}26926[#26926] - -Java High Level REST Client:: -* Make ShardSearchTarget optional when parsing ShardSearchFailure {pull}27078[#27078] (issue: {issue}27055[#27055]) - -Java REST Client:: -* rest-client-sniffer: configurable threadfactory {pull}26897[#26897] - -Mapping:: -* wrong link target for datatype murmur3 {pull}27143[#27143] - -Network:: -* Check for closed connection while opening {pull}26932[#26932] - -Packaging:: -* Fix handling of Windows paths containing parentheses {pull}26916[#26916] (issue: {issue}26454[#26454]) - -Percolator:: -* Also support query extraction for queries wrapped inside a ESToParentBlockJoinQuery {pull}26754[#26754] - -Plugin Analysis Phonetic:: -* Fix beidermorse phonetic token filter for unspecified `languageset` {pull}27112[#27112] (issue: {issue}26771[#26771]) - -Plugin Repository Azure:: -* Use Azure upload method instead of our own implementation {pull}26751[#26751] - -REST:: -* Fix inconsistencies in the rest api specs for cat.snapshots {pull}26996[#26996] (issues: {issue}25737[#25737], {issue}26923[#26923]) -* Fix inconsistencies in the rest api specs for *_script {pull}26971[#26971] (issue: {issue}26923[#26923]) -* exists template needs a template name {pull}25988[#25988] - -Reindex API:: -* Fix update_by_query's default size parameter {pull}26784[#26784] (issue: {issue}26761[#26761]) - -Search:: -* Avoid stack overflow on search phases {pull}27069[#27069] (issue: {issue}27042[#27042]) -* Fix search_after with geo distance sorting {pull}26891[#26891] -* Fix serialization errors when cross cluster search goes to a single shard {pull}26881[#26881] (issue: {issue}26833[#26833]) -* Early termination with index sorting should not set terminated_early in the response {pull}26597[#26597] (issue: {issue}26408[#26408]) -* Format doc values fields. {pull}22146[#22146] - -Sequence IDs:: -* Fire global checkpoint sync under system context {pull}26984[#26984] - -Settings:: -* Emit settings deprecation logging on empty update {pull}27017[#27017] (issue: {issue}26419[#26419]) -* Fix filtering for ListSetting {pull}26914[#26914] - -Stats:: -* Keep cumulative elapsed scroll time in microseconds {pull}27068[#27068] (issue: {issue}27046[#27046]) - -Suggesters:: -* Fix division by zero in phrase suggester that causes assertion to fail {pull}27149[#27149] - diff --git a/docs/reference/release-notes/6.0.0.asciidoc b/docs/reference/release-notes/6.0.0.asciidoc deleted file mode 100644 index 6fed1e1f38716..0000000000000 --- a/docs/reference/release-notes/6.0.0.asciidoc +++ /dev/null @@ -1,1065 +0,0 @@ -[[release-notes-6.0.0]] -== 6.0.0 Release Notes - -These release notes include all changes made in the alpha, beta, and RC releases of 6.0.0, -excluding those that have already been released in the 5.x series before 6.0.0-alpha1. - -Also see: - -* <> -* <> - -[[breaking-6.0.0]] -[float] -=== Breaking changes - -Aggregations:: -* Change parsing of numeric `to` and `from` parameters in `date_range` aggregation {pull}25376[#25376] (issue: {issue}17920[#17920]) - -Aliases:: -* Wrong behavior deleting alias {pull}23997[#23997] (issues: {issue}10106[#10106], {issue}23960[#23960]) - -Allocation:: -* Remove `cluster.routing.allocation.snapshot.relocation_enabled` setting {pull}20994[#20994] - -Analysis:: -* Do not allow custom analyzers to have the same names as built-in analyzers {pull}22349[#22349] (issue: {issue}22263[#22263]) -* Removing query-string parameters in `_analyze` API {pull}20704[#20704] (issue: {issue}20246[#20246]) - -CAT API:: -* Write -1 on unbounded queue in cat thread pool {pull}21342[#21342] (issue: {issue}21187[#21187]) - -CRUD:: -* Disallow `VersionType.FORCE` for GetRequest {pull}21079[#21079] (issue: {issue}20995[#20995]) -* Disallow `VersionType.FORCE` versioning for 6.x indices {pull}20995[#20995] (issue: {issue}20377[#20377]) -* If the index does not exist, delete document will not auto create it {pull}24518[#24518] (issue: {issue}15425[#15425]) - -Cluster:: -* Disallow : in cluster and index/alias names {pull}26247[#26247] (issue: {issue}23892[#23892]) -* No longer allow cluster name in data path {pull}20433[#20433] (issue: {issue}20391[#20391]) - -Core:: -* Simplify file store {pull}24402[#24402] (issue: {issue}24390[#24390]) -* Make boolean conversion strict {pull}22200[#22200] -* Remove the `default` store type. {pull}21616[#21616] -* Remove store throttling. {pull}21573[#21573] - -Geo:: -* Remove deprecated geo search features {pull}22876[#22876] -* Reduce GeoDistance Insanity {pull}19846[#19846] - -Highlighting:: -* Remove the postings highlighter and make unified the default highlighter choice {pull}25028[#25028] - -Index APIs:: -* Remove (deprecated) support for '+' in index expressions {pull}25274[#25274] (issue: {issue}24515[#24515]) -* Delete index API to work only against concrete indices {pull}25268[#25268] (issues: {issue}2318[#2318], {issue}23997[#23997]) -* Open/Close index api to allow_no_indices by default {pull}24401[#24401] (issues: {issue}24031[#24031], {issue}24341[#24341]) -* Remove support for controversial `ignore_unavailable` and `allow_no_indices` from indices exists api {pull}20712[#20712] - -Index Templates:: -* Allows multiple patterns to be specified for index templates {pull}21009[#21009] (issue: {issue}20690[#20690]) - -Indexed Scripts/Templates:: -* Scripting: Remove search template actions {pull}25717[#25717] - -Ingest:: -* update ingest-user-agent regexes.yml {pull}25608[#25608] -* remove ingest.new_date_format {pull}25583[#25583] - -Inner Hits:: -* Return the _source of inner hit nested as is without wrapping it into its full path context {pull}26982[#26982] (issues: {issue}26102[#26102], {issue}26944[#26944]) - -Java API:: -* Enforce Content-Type requirement on the rest layer and remove deprecated methods {pull}23146[#23146] (issue: {issue}19388[#19388]) - -Java REST Client:: -* Remove deprecated created and found from index, delete and bulk {pull}25516[#25516] (issues: {issue}19566[#19566], {issue}19630[#19630], {issue}19633[#19633]) - -Mapping:: -* Reject out of range numbers for float, double and half_float {pull}25826[#25826] (issue: {issue}25534[#25534]) -* Enforce at most one type. {pull}24428[#24428] (issue: {issue}24317[#24317]) -* Disallow `include_in_all` for 6.0+ indices {pull}22970[#22970] (issue: {issue}22923[#22923]) -* Disable _all by default, disallow configuring _all on 6.0+ indices {pull}22144[#22144] (issues: {issue}19784[#19784], {issue}20925[#20925], {issue}21341[#21341]) -* Throw an exception on unrecognized "match_mapping_type" {pull}22090[#22090] (issue: {issue}17285[#17285]) - -Network:: -* Remove unused Netty-related settings {pull}26161[#26161] -* Remove blocking TCP clients and servers {pull}22639[#22639] -* Remove `modules/transport_netty_3` in favor of `netty_4` {pull}21590[#21590] -* Remove LocalTransport in favor of MockTcpTransport {pull}20695[#20695] - -Packaging:: -* Configure heap dump path out of the box {pull}26755[#26755] (issue: {issue}26665[#26665]) -* Remove support for ES_INCLUDE {pull}25804[#25804] -* Setup: Change default heap to 1G {pull}25695[#25695] -* Use config directory to find jvm.options {pull}25679[#25679] (issue: {issue}23004[#23004]) -* Remove implicit 32-bit support {pull}25435[#25435] -* Remove default path settings {pull}25408[#25408] (issue: {issue}25357[#25357]) -* Remove path.conf setting {pull}25392[#25392] (issue: {issue}25357[#25357]) -* Honor masking of systemd-sysctl.service {pull}24234[#24234] (issues: {issue}21899[#21899], {issue}806[#806]) -* Rename CONF_DIR to ES_PATH_CONF {pull}26197[#26197] (issue: {issue}26154[#26154]) -* Remove customization of ES_USER and ES_GROUP {pull}23989[#23989] (issue: {issue}23848[#23848]) - -Percolator:: -* Remove deprecated percolate and mpercolate apis {pull}22331[#22331] - -Plugin Analysis ICU:: -* Upgrade icu4j for the ICU analysis plugin to 59.1 {pull}25243[#25243] (issue: {issue}21425[#21425]) -* Upgrade icu4j to latest version {pull}24821[#24821] - -Plugin Delete By Query:: -* Require explicit query in _delete_by_query API {pull}23632[#23632] (issue: {issue}23629[#23629]) - -Plugin Discovery Azure Classic:: -* Remove `discovery.type` BWC layer from the EC2/Azure/GCE plugins {pull}25080[#25080] (issue: {issue}24543[#24543]) - -Plugin Discovery EC2:: -* Ec2 Discovery: Cleanup deprecated settings {pull}24150[#24150] -* Discovery EC2: Remove region setting {pull}23991[#23991] (issue: {issue}22758[#22758]) -* AWS Plugins: Remove signer type setting {pull}23984[#23984] (issue: {issue}22599[#22599]) - -Plugin Lang JS:: -* Remove lang-python and lang-javascript {pull}20734[#20734] (issue: {issue}20698[#20698]) - -Plugin Mapper Attachment:: -* Remove mapper attachments plugin {pull}20416[#20416] (issue: {issue}18837[#18837]) - -Plugin Repository Azure:: -* Remove global `repositories.azure` settings {pull}23262[#23262] (issues: {issue}22800[#22800], {issue}22856[#22856]) -* Remove auto creation of container for azure repository {pull}22858[#22858] (issue: {issue}22857[#22857]) - -Plugin Repository GCS:: -* GCS Repository: Remove specifying credential file on disk {pull}24727[#24727] - -Plugin Repository S3:: -* S3 Repository: Cleanup deprecated settings {pull}24097[#24097] -* S3 Repository: Remove region setting {pull}22853[#22853] (issue: {issue}22758[#22758]) -* S3 Repository: Remove bucket auto create {pull}22846[#22846] (issue: {issue}22761[#22761]) -* S3 Repository: Remove env var and sysprop credentials support {pull}22842[#22842] -* Remove deprecated S3 settings {pull}24445[#24445] - -Plugins:: -* Make plugin loading stricter {pull}25405[#25405] - -Query DSL:: -* Remove deprecated `type` and `slop` field in `match` query {pull}26720[#26720] -* Remove several parse field deprecations in query builders {pull}26711[#26711] -* Remove deprecated parameters from `ids_query` {pull}26508[#26508] -* Refactor QueryStringQuery for 6.0 {pull}25646[#25646] (issue: {issue}25574[#25574]) -* Change `split_on_whitespace` default to false {pull}25570[#25570] (issue: {issue}25470[#25470]) -* Remove deprecated template query {pull}24577[#24577] (issue: {issue}19390[#19390]) -* Throw exception in scroll requests using `from` {pull}26235[#26235] (issue: {issue}9373[#9373]) -* Remove deprecated `minimum_number_should_match` in BoolQueryBuilder {pull}22416[#22416] -* Remove support for empty queries {pull}22092[#22092] (issue: {issue}17624[#17624]) -* Remove deprecated query names: in, geo_bbox, mlt, fuzzy_match and match_fuzzy {pull}21852[#21852] -* The `terms` query should always map to a Lucene `TermsQuery`. {pull}21786[#21786] -* Be strict when parsing values searching for booleans {pull}21555[#21555] (issue: {issue}21545[#21545]) -* Remove collect payloads parameter {pull}20385[#20385] - -REST:: -* IndexClosedException to return 400 rather than 403 {pull}25752[#25752] -* Remove comma-separated feature parsing for GetIndicesAction {pull}24723[#24723] (issue: {issue}24437[#24437]) -* Improve REST error handling when endpoint does not support HTTP verb, add OPTIONS support {pull}24437[#24437] (issues: {issue}0[#0], {issue}15335[#15335], {issue}17916[#17916]) -* Remove ldjson support and document ndjson for bulk/msearch {pull}23049[#23049] (issue: {issue}23025[#23025]) -* Enable strict duplicate checks for all XContent types {pull}22225[#22225] (issues: {issue}19614[#19614], {issue}22073[#22073]) -* Enable strict duplicate checks for JSON content {pull}22073[#22073] (issue: {issue}19614[#19614]) -* Remove lenient stats parsing {pull}21417[#21417] (issues: {issue}20722[#20722], {issue}21410[#21410]) -* Remove allow unquoted JSON {pull}20388[#20388] (issues: {issue}17674[#17674], {issue}17801[#17801]) -* Remove FORCE version_type {pull}20377[#20377] (issue: {issue}19769[#19769]) - -Scripting:: -* remove lang url parameter from stored script requests {pull}25779[#25779] (issue: {issue}22887[#22887]) -* Disallow lang to be used with Stored Scripts {pull}25610[#25610] -* Remove Deprecated Script Settings {pull}24756[#24756] (issue: {issue}24532[#24532]) -* Scripting: Remove native scripts {pull}24726[#24726] (issue: {issue}19966[#19966]) -* Scripting: Remove file scripts {pull}24627[#24627] (issue: {issue}21798[#21798]) -* Make dates be ReadableDateTimes in scripts {pull}22948[#22948] (issue: {issue}22875[#22875]) -* Remove groovy scripting language {pull}21607[#21607] -* Remove script access to term statistics {pull}19462[#19462] (issue: {issue}19359[#19359]) - -Search:: -* Make `index` in TermsLookup mandatory {pull}25753[#25753] (issue: {issue}25750[#25750]) -* Removes FieldStats API {pull}25628[#25628] (issue: {issue}25577[#25577]) -* Remove deprecated fielddata_fields from search request {pull}25566[#25566] (issue: {issue}25537[#25537]) -* Removes deprecated fielddata_fields {pull}25537[#25537] (issue: {issue}19027[#19027]) -* ProfileResult and CollectorResult should print machine readable timing information {pull}22561[#22561] -* Remove indices query {pull}21837[#21837] (issue: {issue}17710[#17710]) -* Remove ignored type parameter in search_shards api {pull}21688[#21688] - -Sequence IDs:: -* Change certain replica failures not to fail the replica shard {pull}22874[#22874] (issue: {issue}10708[#10708]) - -Settings:: -* Settings: Remove shared setting property {pull}24728[#24728] -* Settings: Remove support for yaml and json config files {pull}24664[#24664] (issue: {issue}19391[#19391]) - -Shadow Replicas:: -* Remove shadow replicas {pull}23906[#23906] (issue: {issue}22024[#22024]) - -Similarities:: -* Similarity should accept dynamic settings when possible {pull}20339[#20339] (issue: {issue}6727[#6727]) - - - -[[breaking-java-6.0.0]] -[float] -=== Breaking Java changes - -Aggregations:: -* Remove the unused SignificantTerms.compareTerm() method {pull}24714[#24714] -* Make SignificantTerms.Bucket an interface rather than an abstract class {pull}24670[#24670] (issue: {issue}24492[#24492]) -* Fix NPE when `values` is omitted on percentile_ranks agg {pull}26046[#26046] -* Make Terms.Bucket an interface rather than an abstract class {pull}24492[#24492] -* Compound order for histogram aggregations {pull}22343[#22343] (issues: {issue}14771[#14771], {issue}20003[#20003], {issue}23613[#23613]) - -Internal:: -* Collapses package structure for some bucket aggs {pull}25579[#25579] (issue: {issue}22868[#22868]) - -Java API:: -* Remove deprecated IdsQueryBuilder ctor {pull}25529[#25529] -* Removing unneeded getTookInMillis method {pull}23923[#23923] -* Java api: ActionRequestBuilder#execute to return a PlainActionFuture {pull}24415[#24415] (issues: {issue}24412[#24412], {issue}9201[#9201]) - -Java High Level REST Client:: -* Unify the result interfaces from get and search in Java client {pull}25361[#25361] (issue: {issue}16440[#16440]) -* Allow RestHighLevelClient to use plugins {pull}25024[#25024] - -Java REST Client:: -* Rename client artifacts {pull}25693[#25693] (issue: {issue}20248[#20248]) - -Network:: -* Simplify TransportAddress {pull}20798[#20798] - -Plugin Delete By Query:: -* Move DeleteByQuery and Reindex requests into core {pull}24578[#24578] - -Plugins:: -* Drop name from TokenizerFactory {pull}24869[#24869] - -Query DSL:: -* Remove QueryParseContext {pull}25486[#25486] -* Remove QueryParseContext from parsing QueryBuilders {pull}25448[#25448] - -REST:: -* Return index name and empty map for `/{index}/_alias` with no aliases {pull}25114[#25114] (issues: {issue}24723[#24723], {issue}25090[#25090]) - - - -[[deprecation-6.0.0]] -[float] -=== Deprecations - -Index APIs:: -* Deprecated use of + in index expressions {pull}24585[#24585] (issue: {issue}24515[#24515]) - -Index Templates:: -* Restore deprecation warning for invalid match_mapping_type values {pull}22304[#22304] - -Indexed Scripts/Templates:: -* Scripting: Deprecate stored search template apis {pull}25437[#25437] (issue: {issue}24596[#24596]) - -Internal:: -* Deprecate XContentType auto detection methods in XContentFactory {pull}22181[#22181] (issue: {issue}19388[#19388]) - -Percolator:: -* Deprecate percolate query's document_type parameter. {pull}25199[#25199] - -Plugins:: -* Plugins: Add backcompat for sha1 checksums {pull}26748[#26748] (issue: {issue}26746[#26746]) - -Scripting:: -* Scripting: Change keys for inline/stored scripts to source/id {pull}25127[#25127] -* Scripting: Deprecate native scripts {pull}24692[#24692] (issue: {issue}19966[#19966]) -* Scripting: Deprecate index lookup {pull}24691[#24691] (issue: {issue}19359[#19359]) -* Deprecate Fine Grain Settings for Scripts {pull}24573[#24573] (issue: {issue}24532[#24532]) -* Scripting: Deprecate file script settings {pull}24555[#24555] (issue: {issue}21798[#21798]) -* Scripting: Deprecate file scripts {pull}24552[#24552] (issue: {issue}21798[#21798]) - -Settings:: -* Settings: Update settings deprecation from yml to yaml {pull}24663[#24663] (issue: {issue}19391[#19391]) -* Deprecate settings in .yml and .json {pull}24059[#24059] (issue: {issue}19391[#19391]) - -Tribe Node:: -* Deprecate tribe service {pull}24598[#24598] (issue: {issue}24581[#24581]) - - - -[[feature-6.0.0]] -[float] -=== New features - -Aggregations:: -* SignificantText aggregation - like significant_terms, but for text {pull}24432[#24432] (issue: {issue}23674[#23674]) - -Analysis:: -* Expose simplepattern and simplepatternsplit tokenizers {pull}25159[#25159] (issue: {issue}23363[#23363]) -* Parse synonyms with the same analysis chain {pull}8049[#8049] (issue: {issue}7199[#7199]) - -Core:: -* Enable index-time sorting {pull}24055[#24055] (issue: {issue}6720[#6720]) - -Internal:: -* Automatically adjust search threadpool queue_size {pull}23884[#23884] (issue: {issue}3890[#3890]) - -Mapping:: -* Add new ip_range field type {pull}24433[#24433] - -Parent/Child:: -* Move parent_id query to the parent-join module {pull}25072[#25072] (issue: {issue}20257[#20257]) -* Introduce ParentJoinFieldMapper, a field mapper that creates parent/child relation within documents of the same index {pull}24978[#24978] (issue: {issue}20257[#20257]) - -Plugin Analysis ICU:: -* Add ICUCollationFieldMapper {pull}24126[#24126] - -Search:: -* Automatically early terminate search query based on index sorting {pull}24864[#24864] (issue: {issue}6720[#6720]) - -Sequence IDs:: -* Add a scheduled translog retention check {pull}25622[#25622] (issues: {issue}10708[#10708], {issue}25294[#25294]) -* Initialize sequence numbers on a shrunken index {pull}25321[#25321] (issue: {issue}10708[#10708]) -* Initialize primary term for shrunk indices {pull}25307[#25307] (issue: {issue}10708[#10708]) -* Introduce translog size and age based retention policies {pull}25147[#25147] (issue: {issue}10708[#10708]) - -Stats:: -* Adds nodes usage API to monitor usages of actions {pull}24169[#24169] - -Task Manager:: -* Task Management [ISSUE] {pull}15117[#15117] - -Upgrade API:: -* TemplateUpgraders should be called during rolling restart {pull}25263[#25263] (issues: {issue}24379[#24379], {issue}24680[#24680]) - - - -[[enhancement-6.0.0]] -[float] -=== Enhancements - -Aggregations:: -* Add strict parsing of aggregation ranges {pull}25769[#25769] -* Adds rewrite phase to aggregations {pull}25495[#25495] (issue: {issue}17676[#17676]) -* Tweak AggregatorBase.addRequestCircuitBreakerBytes {pull}25162[#25162] (issue: {issue}24511[#24511]) -* Add superset size to Significant Term REST response {pull}24865[#24865] -* Add document count to Matrix Stats aggregation response {pull}24776[#24776] -* Adds an implementation of LogLogBeta for the cardinality aggregation {pull}22323[#22323] (issue: {issue}22230[#22230]) -* Support distance units in GeoHashGrid aggregation precision {pull}26291[#26291] (issue: {issue}5042[#5042]) -* Reject multiple methods in `percentiles` aggregation {pull}26163[#26163] (issue: {issue}26095[#26095]) -* Use `global_ordinals_hash` execution mode when sorting by sub aggregations. {pull}26014[#26014] (issue: {issue}24359[#24359]) -* Add a specialized deferring collector for terms aggregator {pull}25190[#25190] -* Agg builder accessibility fixes {pull}24323[#24323] -* Remove support for the include/pattern syntax. {pull}23141[#23141] (issue: {issue}22933[#22933]) -* Promote longs to doubles when a terms agg mixes decimal and non-decimal numbers {pull}22449[#22449] (issue: {issue}22232[#22232]) - -Allocation:: -* Adjust status on bad allocation explain requests {pull}25503[#25503] (issue: {issue}25458[#25458]) -* Promote replica on the highest version node {pull}25277[#25277] (issue: {issue}10708[#10708]) - -Analysis:: -* [Analysis] Support normalizer in request param {pull}24767[#24767] (issue: {issue}23347[#23347]) -* Enforce validation for PathHierarchy tokenizer {pull}23510[#23510] -* [analysis-icu] Allow setting unicodeSetFilter {pull}20814[#20814] (issue: {issue}20820[#20820]) -* Match- and MultiMatchQueryBuilder should only allow setting analyzer on string values {pull}23684[#23684] (issue: {issue}21665[#21665]) - -Bulk:: -* Simplify bulk request execution {pull}20109[#20109] - -CAT API:: -* expand `/_cat/nodes` to return information about hard drive {pull}21775[#21775] (issue: {issue}21679[#21679]) - -CRUD:: -* Added validation for upsert request {pull}24282[#24282] (issue: {issue}16671[#16671]) - -Circuit Breakers:: -* ScriptService: Replace max compilation per minute setting with max compilation rate {pull}26399[#26399] - -Cluster:: -* Validate a joining node's version with version of existing cluster nodes {pull}25808[#25808] -* Switch indices read-only if a node runs out of disk space {pull}25541[#25541] (issue: {issue}24299[#24299]) -* Add a cluster block that allows to delete indices that are read-only {pull}24678[#24678] -* Separate publishing from applying cluster states {pull}24236[#24236] -* Adds cluster state size to /_cluster/state response {pull}23440[#23440] (issue: {issue}3415[#3415]) - -Core:: -* Allow `InputStreamStreamInput` array size validation where applicable {pull}26692[#26692] -* Refactor bootstrap check results and error messages {pull}26637[#26637] -* Add BootstrapContext to expose settings and recovered state to bootstrap checks {pull}26628[#26628] -* Unit testable index creation task on MetaDataCreateIndexService {pull}25961[#25961] -* Ignore .DS_Store files on macOS {pull}27108[#27108] (issue: {issue}23982[#23982]) -* Add max file size bootstrap check {pull}25974[#25974] -* Add compatibility versions to main action response {pull}25799[#25799] -* Index ids in binary form. {pull}25352[#25352] (issues: {issue}18154[#18154], {issue}24615[#24615]) -* Explicitly reject duplicate data paths {pull}25178[#25178] -* Use SPI in High Level Rest Client to load XContent parsers {pull}25097[#25097] -* Upgrade to lucene-7.0.0-snapshot-a0aef2f {pull}24775[#24775] -* Speed up PK lookups at index time. {pull}19856[#19856] -* Use Java 9 FilePermission model {pull}26302[#26302] (issue: {issue}21534[#21534]) -* Add friendlier message on bad keystore permissions {pull}26284[#26284] -* Epoch millis and second formats accept float implicitly {pull}26119[#26119] (issue: {issue}14641[#14641]) -* Remove connect SocketPermissions from core {pull}22797[#22797] -* Add repository-url module and move URLRepository {pull}22752[#22752] (issue: {issue}22116[#22116]) -* Remove accept SocketPermissions from core {pull}22622[#22622] (issue: {issue}22116[#22116]) -* Move IfConfig.logIfNecessary call into bootstrap {pull}22455[#22455] (issue: {issue}22116[#22116]) -* Remove artificial default processors limit {pull}20874[#20874] (issue: {issue}20828[#20828]) -* Simplify write failure handling {pull}19105[#19105] (issue: {issue}20109[#20109]) -* Improve bootstrap checks error messages {pull}24548[#24548] - -Discovery:: -* Allow plugins to validate cluster-state on join {pull}26595[#26595] - -Engine:: -* Add refresh stats tracking for realtime get {pull}25052[#25052] (issue: {issue}24806[#24806]) -* Introducing a translog deletion policy {pull}24950[#24950] -* Fill missing sequence IDs up to max sequence ID when recovering from store {pull}24238[#24238] (issue: {issue}10708[#10708]) -* Use sequence numbers to identify out of order delivery in replicas & recovery {pull}24060[#24060] (issue: {issue}10708[#10708]) -* Add replica ops with version conflict to translog {pull}22626[#22626] -* Clarify global checkpoint recovery {pull}21934[#21934] (issue: {issue}21254[#21254]) -* Move the IndexDeletionPolicy to be engine internal {pull}24930[#24930] (issue: {issue}10708[#10708]) - -Exceptions:: -* IllegalStateException: Only duplicated jar instead of classpath {pull}24953[#24953] - -Highlighting:: -* Picks offset source for the unified highlighter directly from the es mapping {pull}25747[#25747] (issue: {issue}25699[#25699]) - -Index APIs:: -* Let primary own its replication group {pull}25692[#25692] (issue: {issue}25485[#25485]) -* Create index request should return the index name {pull}25139[#25139] (issue: {issue}23044[#23044]) - -Index Templates:: -* Fix error message for a put index template request without index_patterns {pull}27102[#27102] (issue: {issue}27100[#27100]) - -Ingest:: -* Add Ingest-Processor specific Rest Endpoints & Add Grok endpoint {pull}25059[#25059] (issue: {issue}24725[#24725]) -* Port support for commercial GeoIP2 databases from Logstash. {pull}24889[#24889] -* add `exclude_keys` option to KeyValueProcessor {pull}24876[#24876] (issue: {issue}23856[#23856]) -* Allow removing multiple fields in ingest processor {pull}24750[#24750] (issue: {issue}24622[#24622]) -* Add target_field parameter to ingest processors {pull}24133[#24133] (issues: {issue}23228[#23228], {issue}23682[#23682]) - -Inner Hits:: -* Reuse inner hit query weight {pull}24571[#24571] (issue: {issue}23917[#23917]) - -Internal:: -* TemplateUpgradeService should only run on the master {pull}27294[#27294] -* Cleanup IndexFieldData visibility {pull}25900[#25900] -* Bump the min compat version to 5.6.0 {pull}25805[#25805] -* "shard started" should show index and shard ID {pull}25157[#25157] -* Break out clear scroll logic from TransportClearScrollAction {pull}25125[#25125] (issue: {issue}25094[#25094]) -* Add helper methods to TransportActionProxy to identify proxy actions and requests {pull}25124[#25124] -* Add remote cluster infrastructure to fetch discovery nodes. {pull}25123[#25123] (issue: {issue}25094[#25094]) -* Add the ability to set eager_global_ordinals in the new parent-join field {pull}25019[#25019] -* Disallow multiple parent-join fields per mapping {pull}25002[#25002] -* Remove the need for _UNRELEASED suffix in versions {pull}24798[#24798] (issue: {issue}24768[#24768]) -* Optimize the order of bytes in uuids for better compression. {pull}24615[#24615] (issue: {issue}18209[#18209]) -* Prevent cluster internal `ClusterState.Custom` impls to leak to a client {pull}26232[#26232] -* Use holder pattern for lazy deprecation loggers {pull}26218[#26218] (issue: {issue}26210[#26210]) -* Allow `ClusterState.Custom` to be created on initial cluster states {pull}26144[#26144] -* Try to convince the JVM not to lose stacktraces {pull}24426[#24426] (issue: {issue}24376[#24376]) -* Make document write requests immutable {pull}23038[#23038] -* Add assertions enabled helper {pull}24834[#24834] - -Java API:: -* Always Accumulate Transport Exceptions {pull}25017[#25017] (issue: {issue}23099[#23099]) - -Java High Level REST Client:: -* [DOCS] restructure java clients docs pages {pull}25517[#25517] -* Use SPI in High Level Rest Client to load XContent parsers {pull}25098[#25098] (issues: {issue}25024[#25024], {issue}25097[#25097]) -* Add support for clear scroll to high level REST client {pull}25038[#25038] -* Add search scroll method to high level REST client {pull}24938[#24938] (issue: {issue}23331[#23331]) -* Add search method to high level REST client {pull}24796[#24796] (issues: {issue}24794[#24794], {issue}24795[#24795]) -* Make RestHighLevelClient Closeable and simplify its creation {pull}26180[#26180] (issue: {issue}26086[#26086]) -* Add info method to High Level Rest client {pull}23350[#23350] -* Add support for named xcontent parsers to high level REST client {pull}23328[#23328] -* Add BulkRequest support to High Level Rest client {pull}23312[#23312] -* Add UpdateRequest support to High Level Rest client {pull}23266[#23266] -* Add delete API to the High Level Rest Client {pull}23187[#23187] -* Add Index API to High Level Rest Client {pull}23040[#23040] -* Add get/exists method to RestHighLevelClient {pull}22706[#22706] -* Add fromxcontent methods to delete response {pull}22680[#22680] (issue: {issue}22229[#22229]) -* Add REST high level client gradle submodule and first simple method {pull}22371[#22371] -* Add doc_count to ParsedMatrixStats {pull}24952[#24952] (issue: {issue}24776[#24776]) -* Add fromXContent method to ClearScrollResponse {pull}24909[#24909] -* ClearScrollRequest to implement ToXContentObject {pull}24907[#24907] -* SearchScrollRequest to implement ToXContentObject {pull}24906[#24906] (issue: {issue}3889[#3889]) -* Add aggs parsers for high level REST Client {pull}24824[#24824] (issues: {issue}23965[#23965], {issue}23973[#23973], {issue}23974[#23974], {issue}24085[#24085], {issue}24160[#24160], {issue}24162[#24162], {issue}24182[#24182], {issue}24183[#24183], {issue}24208[#24208], {issue}24213[#24213], {issue}24239[#24239], {issue}24284[#24284], {issue}24312[#24312], {issue}24330[#24330], {issue}24365[#24365], {issue}24371[#24371], {issue}24442[#24442], {issue}24521[#24521], {issue}24524[#24524], {issue}24564[#24564], {issue}24583[#24583], {issue}24589[#24589], {issue}24648[#24648], {issue}24667[#24667], {issue}24675[#24675], {issue}24682[#24682], {issue}24700[#24700], {issue}24706[#24706], {issue}24717[#24717], {issue}24720[#24720], {issue}24738[#24738], {issue}24746[#24746], {issue}24789[#24789], {issue}24791[#24791], {issue}24794[#24794], {issue}24796[#24796], {issue}24822[#24822]) - -Java REST Client:: -* Shade external dependencies in the rest client jar {pull}25780[#25780] (issue: {issue}25208[#25208]) -* RestClient uses system properties and system default SSLContext {pull}25757[#25757] (issue: {issue}23231[#23231]) -* Wrap rest httpclient with doPrivileged blocks {pull}22603[#22603] (issue: {issue}22116[#22116]) - -Logging:: -* Prevent excessive disk consumption by log files {pull}25660[#25660] -* Use LRU set to reduce repeat deprecation messages {pull}25474[#25474] (issue: {issue}25457[#25457]) - -Mapping:: -* More efficient encoding of range fields. {pull}26470[#26470] (issue: {issue}26443[#26443]) -* Don't detect source's XContentType in DocumentParser.parseDocument() {pull}26880[#26880] -* Better validation of `copy_to`. {pull}25983[#25983] -* Optimize `terms` queries on `ip` addresses to use a `PointInSetQuery` whenever possible. {pull}25669[#25669] (issue: {issue}25667[#25667]) -* Loosen the restrictions on disabling _all in 6.x {pull}26259[#26259] -* Date detection should not rely on a hardcoded set of characters. {pull}22171[#22171] (issue: {issue}1694[#1694]) -* Identify documents by their `_id`. {pull}24460[#24460] - -Network:: -* Add additional low-level logging handler {pull}26887[#26887] -* Unwrap causes when maybe dying {pull}26884[#26884] -* Move TransportStats accounting into TcpTransport {pull}25251[#25251] -* Simplify connection closing and cleanups in TcpTransport {pull}25250[#25250] -* Disable the Netty recycler in the client {pull}24793[#24793] (issues: {issue}22452[#22452], {issue}24721[#24721]) -* Remove Netty logging hack {pull}24653[#24653] (issues: {issue}24469[#24469], {issue}5624[#5624], {issue}6568[#6568], {issue}6696[#6696]) -* Isolate SocketPermissions to Netty {pull}23057[#23057] -* Wrap netty accept/connect ops with doPrivileged {pull}22572[#22572] (issue: {issue}22116[#22116]) -* Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions {pull}22287[#22287] (issue: {issue}22116[#22116]) - -Packaging:: -* Remove memlock suggestion from systemd service {pull}25979[#25979] -* Set address space limit in systemd service file {pull}25975[#25975] -* Version option should display if snapshot {pull}25970[#25970] -* Ignore JVM options before checking Java version {pull}25969[#25969] -* Also skip JAVA_TOOL_OPTIONS on Windows {pull}25968[#25968] -* Introduce elasticsearch-env for Windows {pull}25958[#25958] -* Introduce elasticsearch-env {pull}25815[#25815] (issue: {issue}20286[#20286]) -* Stop exporting HOSTNAME from scripts {pull}25807[#25807] -* Set number of processes in systemd unit file {pull}24970[#24970] (issue: {issue}20874[#20874]) - -Parent/Child:: -* Remove ParentJoinFieldSubFetchPhase {pull}25550[#25550] (issue: {issue}25363[#25363]) -* Support parent id being specified as number in the _source {pull}25547[#25547] - -Percolator:: -* Store the QueryBuilder's Writable representation instead of its XContent representation {pull}25456[#25456] -* Add support for selecting percolator query candidate matches containing wildcard / prefix queries {pull}25351[#25351] - -Plugin Discovery EC2:: -* Read ec2 discovery address from aws instance tags {pull}22743[#22743] (issue: {issue}22566[#22566]) - -Plugin Lang Painless:: -* Allow Custom Whitelists in Painless {pull}25557[#25557] -* Update Painless to Allow Augmentation from Any Class {pull}25360[#25360] -* Add Needs Methods to Painless Script Context Factories {pull}25267[#25267] -* Support Script Context Stateful Factory in Painless {pull}25233[#25233] -* Generate Painless Factory for Creating Script Instances {pull}25120[#25120] -* Update Painless to Use New Script Contexts {pull}25015[#25015] -* Optimize instance creation in LambdaBootstrap {pull}24618[#24618] -* Make Painless Compiler Use an Instance Per Context {pull}24972[#24972] -* Make PainlessScript An Interface {pull}24966[#24966] - -Plugin Repository GCS:: -* GCS Repository: Add secure storage of credentials {pull}24697[#24697] - -Plugin Repository HDFS:: -* Add permission checks before reading from HDFS stream {pull}26716[#26716] (issue: {issue}26714[#26714]) -* Add doPrivilege blocks for socket connect ops in repository-hdfs {pull}22793[#22793] (issue: {issue}22116[#22116]) -* Add Kerberos support for Repo HDFS plugin [ISSUE] {pull}21990[#21990] - -Plugin Repository S3:: -* S3 Repository: Add back repository level credentials {pull}24609[#24609] - -Plugins:: -* Adjust SHA-512 supported format on plugin install {pull}27093[#27093] -* Move tribe to a module {pull}25778[#25778] -* Plugins can register pre-configured char filters {pull}25000[#25000] (issue: {issue}23658[#23658]) -* Add purge option to remove plugin CLI {pull}24981[#24981] -* Allow plugins to register pre-configured tokenizers {pull}24751[#24751] (issues: {issue}24223[#24223], {issue}24572[#24572]) -* Move ReindexAction class to core {pull}24684[#24684] (issue: {issue}24578[#24578]) -* Make PreConfiguredTokenFilter harder to misuse {pull}24572[#24572] (issue: {issue}23658[#23658]) -* Plugins: Remove leniency for missing plugins dir {pull}24173[#24173] -* Add doPrivilege blocks for socket connect operations in plugins {pull}22534[#22534] (issue: {issue}22116[#22116]) - -Query DSL:: -* Make slop optional when parsing `span_near` query {pull}25677[#25677] (issue: {issue}25642[#25642]) -* Require a field when a `seed` is provided to the `random_score` function. {pull}25594[#25594] (issue: {issue}25240[#25240]) -* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}23147[#23147] - -REST:: -* Cat shards bytes {pull}26952[#26952] -* Refactor PathTrie and RestController to use a single trie for all methods {pull}25459[#25459] (issue: {issue}24437[#24437]) -* Make ObjectParser support string to boolean conversion {pull}24668[#24668] (issue: {issue}21802[#21802]) - -Recovery:: -* Introduce a History UUID as a requirement for ops based recovery {pull}26577[#26577] (issue: {issue}10708[#10708]) -* Goodbye, Translog Views {pull}25962[#25962] -* Disallow multiple concurrent recovery attempts for same target shard {pull}25428[#25428] -* Live primary-replica resync (no rollback) {pull}24841[#24841] (issue: {issue}10708[#10708]) -* Peer Recovery: remove maxUnsafeAutoIdTimestamp hand off {pull}24243[#24243] (issue: {issue}24149[#24149]) -* Introduce sequence-number-based recovery {pull}22484[#22484] (issue: {issue}10708[#10708]) - -Scripting:: -* Scripting: Rename SearchScript.needsScores to needs_score {pull}25235[#25235] -* Scripting: Add optional context parameter to put stored script requests {pull}25014[#25014] -* Add New Security Script Settings {pull}24637[#24637] (issue: {issue}24532[#24532]) -* Add StatefulFactoryType as optional intermediate factory in script contexts {pull}24974[#24974] (issue: {issue}20426[#20426]) -* Make contexts available to ScriptEngine construction {pull}24896[#24896] -* Make ScriptEngine.compile generic on the script context {pull}24873[#24873] -* Add instance and compiled classes to script contexts {pull}24868[#24868] - -Search:: -* Add soft limit on allowed number of script fields in request {pull}26598[#26598] (issue: {issue}26390[#26390]) -* Add a soft limit for the number of requested doc-value fields {pull}26574[#26574] (issue: {issue}26390[#26390]) -* Rewrite search requests on the coordinating nodes {pull}25814[#25814] (issue: {issue}25791[#25791]) -* Ensure query resources are fetched asynchronously during rewrite {pull}25791[#25791] -* Introduce a new Rewriteable interface to streamline rewriting {pull}25788[#25788] -* Reduce the scope of `QueryRewriteContext` {pull}25787[#25787] -* Reduce the overhead of timeouts and low-level search cancellation. {pull}25776[#25776] -* Reduce profiling overhead. {pull}25772[#25772] (issue: {issue}24799[#24799]) -* Prevent `can_match` requests from sending to incompatible nodes {pull}25705[#25705] (issue: {issue}25704[#25704]) -* Add a shard filter search phase to pre-filter shards based on query rewriting {pull}25658[#25658] -* Ensure we rewrite common queries to `match_none` if possible {pull}25650[#25650] -* Limit the number of concurrent shard requests per search request {pull}25632[#25632] -* Add cluster name validation to RemoteClusterConnection {pull}25568[#25568] -* Speed up sorted scroll when the index sort matches the search sort {pull}25138[#25138] (issue: {issue}6720[#6720]) -* Leverage scorerSupplier when applicable. {pull}25109[#25109] -* Add Cross Cluster Search support for scroll searches {pull}25094[#25094] -* Track EWMA[1] of task execution time in search threadpool executor {pull}24989[#24989] (issue: {issue}24915[#24915]) -* Query range fields by doc values when they are expected to be more efficient than points {pull}24823[#24823] (issue: {issue}24314[#24314]) -* Search: Fairer balancing when routing searches by session ID {pull}24671[#24671] (issue: {issue}24642[#24642]) -* Add parsing from xContent to Suggest {pull}22903[#22903] -* Add parsing from xContent to ShardSearchFailure {pull}22699[#22699] -* Eliminate array access in tight loops when profiling is enabled. {pull}24959[#24959] -* Support Multiple Inner Hits on a Field Collapse Request {pull}24517[#24517] -* Expand cross cluster search indices for search requests to the concrete index or to it's aliases {pull}24502[#24502] - -Search Templates:: -* Add max concurrent searches to multi template search {pull}24255[#24255] (issues: {issue}20912[#20912], {issue}21907[#21907]) - -Sequence IDs:: -* Roll translog generation on primary promotion {pull}27313[#27313] -* Restoring from snapshot should force generation of a new history uuid {pull}26694[#26694] (issues: {issue}10708[#10708], {issue}26544[#26544], {issue}26557[#26557], {issue}26577[#26577]) -* Add global checkpoint tracking on the primary {pull}26666[#26666] (issue: {issue}26591[#26591]) -* Introduce global checkpoint background sync {pull}26591[#26591] (issues: {issue}26573[#26573], {issue}26630[#26630], {issue}26666[#26666]) -* Move `UNASSIGNED_SEQ_NO` and `NO_OPS_PERFORMED` to SequenceNumbers` {pull}26494[#26494] (issue: {issue}10708[#10708]) -* Move primary term from ReplicationRequest to ConcreteShardRequest {pull}25822[#25822] -* Add reason to global checkpoint updates on replica {pull}25612[#25612] (issue: {issue}10708[#10708]) -* Introduce primary/replica mode for GlobalCheckPointTracker {pull}25468[#25468] -* Throw back replica local checkpoint on new primary {pull}25452[#25452] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Update global checkpoint when increasing primary term on replica {pull}25422[#25422] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Enable a long translog retention policy by default {pull}25294[#25294] (issues: {issue}10708[#10708], {issue}25147[#25147]) -* Introduce primary context {pull}25122[#25122] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Block older operations on primary term transition {pull}24779[#24779] (issue: {issue}10708[#10708]) -* Block global checkpoint advances when recovering {pull}24404[#24404] (issue: {issue}10708[#10708]) -* Add primary term to doc write response {pull}24171[#24171] (issue: {issue}10708[#10708]) -* Preserve multiple translog generations {pull}24015[#24015] (issue: {issue}10708[#10708]) -* Introduce translog generation rolling {pull}23606[#23606] (issue: {issue}10708[#10708]) -* Replicate write failures {pull}23314[#23314] -* Introduce sequence-number-aware translog {pull}22822[#22822] (issue: {issue}10708[#10708]) -* Introduce translog no-op {pull}22291[#22291] (issue: {issue}10708[#10708]) -* Tighten sequence numbers recovery {pull}22212[#22212] (issue: {issue}10708[#10708]) -* Add BWC layer to seq no infra and enable BWC tests {pull}22185[#22185] (issue: {issue}21670[#21670]) -* Add internal _primary_term doc values field, fix _seq_no indexing {pull}21637[#21637] (issues: {issue}10708[#10708], {issue}21480[#21480]) -* Add global checkpoint to translog checkpoints {pull}21254[#21254] -* Sequence numbers commit data for Lucene uses Iterable interface {pull}20793[#20793] (issue: {issue}10708[#10708]) -* Simplify GlobalCheckpointService and properly hook it for cluster state updates {pull}20720[#20720] -* Fill gaps on primary promotion {pull}24945[#24945] (issue: {issue}10708[#10708]) -* Introduce clean transition on primary promotion {pull}24925[#24925] (issue: {issue}10708[#10708]) -* Guarantee that translog generations are seqNo conflict free {pull}24825[#24825] (issues: {issue}10708[#10708], {issue}24779[#24779]) -* Inline global checkpoints {pull}24513[#24513] (issue: {issue}10708[#10708]) - -Settings:: -* Add disk threshold settings validation {pull}25600[#25600] (issue: {issue}25560[#25560]) -* Enable cross-setting validation {pull}25560[#25560] (issue: {issue}25541[#25541]) -* Validate `transport.profiles.*` settings {pull}25508[#25508] -* Cleanup network / transport related settings {pull}25489[#25489] -* Emit settings deprecation logging at most once {pull}25457[#25457] -* IndexMetaData: Introduce internal format index setting {pull}25292[#25292] -* Persist created keystore on startup unless keystore is present {pull}26253[#26253] (issue: {issue}26126[#26126]) -* Settings: Add keystore.seed auto generated secure setting {pull}26149[#26149] -* Settings: Add keystore creation to add commands {pull}26126[#26126] - -Snapshot/Restore:: -* Fixed references to Multi Index Syntax {pull}27283[#27283] -* Improves snapshot logging and snapshot deletion error handling {pull}25264[#25264] -* Enhances get snapshots API to allow retrieving repository index only {pull}24477[#24477] (issue: {issue}24288[#24288]) - -Stats:: -* Update `IndexShard#refreshMetric` via a `ReferenceManager.RefreshListener` {pull}25083[#25083] (issues: {issue}24806[#24806], {issue}25052[#25052]) -* Expose disk usage estimates in nodes stats {pull}22081[#22081] (issue: {issue}8686[#8686]) - -Store:: -* Remote support for lucene versions without checksums {pull}24021[#24021] - -Suggesters:: -* Remove deprecated _suggest endpoint {pull}22203[#22203] (issue: {issue}20305[#20305]) - -Task Manager:: -* Add descriptions to bulk tasks {pull}22059[#22059] (issue: {issue}21768[#21768]) - -Translog:: -* Translog file recovery should not rely on lucene commits {pull}25005[#25005] (issue: {issue}24950[#24950]) - - - -[[bug-6.0.0]] -[float] -=== Bug fixes - -Aggregations:: -* Do not delegate a null scorer to LeafBucketCollectors {pull}26747[#26747] (issue: {issue}26611[#26611]) -* Create weights lazily in filter and filters aggregation {pull}26983[#26983] -* Fix IndexOutOfBoundsException in histograms for NaN doubles (#26787) {pull}26856[#26856] (issue: {issue}26787[#26787]) -* Scripted_metric _agg parameter disappears if params are provided {pull}19863[#19863] (issue: {issue}19768[#19768]) -* Fixes array out of bounds for value count agg {pull}26038[#26038] (issue: {issue}17379[#17379]) -* Aggregations bug: Significant_text fails on arrays of text. {pull}25030[#25030] (issue: {issue}25029[#25029]) -* Check bucket metric ages point to a multi bucket agg {pull}26215[#26215] (issue: {issue}25775[#25775]) -* Terms aggregation should remap global ordinal buckets when a sub-aggregator is used to sort the terms {pull}24941[#24941] (issue: {issue}24788[#24788]) -* Correctly set doc_count when MovAvg "predicts" values on existing buckets {pull}24892[#24892] (issue: {issue}24327[#24327]) -* DateHistogram: Fix `extended_bounds` with `offset` {pull}23789[#23789] (issue: {issue}23776[#23776]) -* Fix ArrayIndexOutOfBoundsException when no ranges are specified in the query {pull}23241[#23241] (issue: {issue}22881[#22881]) - -Aliases:: -* mget with an alias shouldn't ignore alias routing {pull}25697[#25697] (issue: {issue}25696[#25696]) -* GET aliases should 404 if aliases are missing {pull}25043[#25043] (issue: {issue}24644[#24644]) - -Allocation:: -* Fix DiskThresholdMonitor flood warning {pull}26204[#26204] (issue: {issue}26201[#26201]) -* Allow wildcards for shard IP filtering {pull}26187[#26187] (issues: {issue}22591[#22591], {issue}26184[#26184]) - -Analysis:: -* Pre-configured shingle filter should disable graph analysis {pull}25853[#25853] (issue: {issue}25555[#25555]) -* PatternAnalyzer should lowercase wildcard queries when `lowercase` is true. {pull}24967[#24967] - -CAT API:: -* Fix NPE for /_cat/indices when no primary shard {pull}26953[#26953] (issue: {issue}26942[#26942]) - -CRUD:: -* Serialize and expose timeout of acknowledged requests in REST layer {pull}26189[#26189] (issue: {issue}26213[#26213]) -* Fix silent loss of last command to _bulk and _msearch due to missing newline {pull}25740[#25740] (issue: {issue}7601[#7601]) - -Cache:: -* Reduce the default number of cached queries. {pull}26949[#26949] (issue: {issue}26938[#26938]) -* fix bug of weight computation {pull}24856[#24856] - -Circuit Breakers:: -* Checks the circuit breaker before allocating bytes for a new big array {pull}25010[#25010] (issue: {issue}24790[#24790]) - -Cluster:: -* Register setting `cluster.indices.tombstones.size` {pull}26193[#26193] (issue: {issue}26191[#26191]) - -Core:: -* Correctly encode warning headers {pull}27269[#27269] (issue: {issue}27244[#27244]) -* Fix cache compute if absent for expired entries {pull}26516[#26516] -* Timed runnable should delegate to abstract runnable {pull}27095[#27095] (issue: {issue}27069[#27069]) -* Stop invoking non-existent syscall {pull}27016[#27016] (issue: {issue}20179[#20179]) -* MetaData Builder doesn't properly prevent an alias with the same name as an index {pull}26804[#26804] -* Release operation permit on thread-pool rejection {pull}25930[#25930] (issue: {issue}25863[#25863]) -* Node should start up despite of a lingering `.es_temp_file` {pull}21210[#21210] (issue: {issue}21007[#21007]) -* Fix cache expire after access {pull}24546[#24546] - -Dates:: -* Fix typo in date format {pull}26503[#26503] (issue: {issue}26500[#26500]) - -Discovery:: -* MasterNodeChangePredicate should use the node instance to detect master change {pull}25877[#25877] (issue: {issue}25471[#25471]) - -Engine:: -* Die with dignity while merging {pull}27265[#27265] (issue: {issue}19272[#19272]) -* Engine - do not index operations with seq# lower than the local checkpoint into lucene {pull}25827[#25827] (issues: {issue}1[#1], {issue}2[#2], {issue}25592[#25592]) - -Geo:: -* Fix typo in GeoUtils#isValidLongitude {pull}25121[#25121] - -Highlighting:: -* Fix percolator highlight sub fetch phase to not highlight query twice {pull}26622[#26622] -* FastVectorHighlighter should not cache the field query globally {pull}25197[#25197] (issue: {issue}25171[#25171]) -* Higlighters: Fix MultiPhrasePrefixQuery rewriting {pull}25103[#25103] (issue: {issue}25088[#25088]) -* Fix nested query highlighting {pull}26305[#26305] (issue: {issue}26230[#26230]) - -Index APIs:: -* Shrink API should ignore templates {pull}25380[#25380] (issue: {issue}25035[#25035]) -* Rollover max docs should only count primaries {pull}24977[#24977] (issue: {issue}24217[#24217]) -* Validates updated settings on closed indices {pull}24487[#24487] (issue: {issue}23787[#23787]) - -Ingest:: -* date processor should not fail if timestamp is specified as json number {pull}26986[#26986] (issue: {issue}26967[#26967]) -* date_index_name processor should not fail if timestamp is specified as json number {pull}26910[#26910] (issue: {issue}26890[#26890]) -* Sort Processor does not have proper behavior with targetField {pull}25237[#25237] (issue: {issue}24133[#24133]) -* fix grok's pattern parsing to validate pattern names in expression {pull}25063[#25063] (issue: {issue}22831[#22831]) -* Remove support for Visio and potm files {pull}22079[#22079] (issue: {issue}22077[#22077]) -* Fix floating-point error when DateProcessor parses UNIX {pull}24947[#24947] -* add option for _ingest.timestamp to use new ZonedDateTime (5.x backport) {pull}24030[#24030] (issues: {issue}23168[#23168], {issue}23174[#23174]) - -Inner Hits:: -* Do not allow inner hits that fetch _source and have a non nested object field as parent {pull}25749[#25749] (issue: {issue}25315[#25315]) -* When fetching nested inner hits only access stored fields when needed {pull}25864[#25864] (issue: {issue}6[#6]) -* If size / offset are out of bounds just do a plain count {pull}20556[#20556] (issue: {issue}20501[#20501]) -* Fix Source filtering in new field collapsing feature {pull}24068[#24068] (issue: {issue}24063[#24063]) - -Internal:: -* Bump version to 6.0.1 [OPEN] {pull}27386[#27386] -* `IndexShard.routingEntry` should only be updated once all internal state is ready {pull}26776[#26776] -* Catch exceptions and inform handler in RemoteClusterConnection#collectNodes {pull}26725[#26725] (issue: {issue}26700[#26700]) -* Internal: Add versionless alias for rest client codebase in policy files {pull}26521[#26521] -* Upgrade Lucene to version 7.0.1 {pull}26926[#26926] -* Fix BytesReferenceStreamInput#skip with offset {pull}25634[#25634] -* Fix race condition in RemoteClusterConnection node supplier {pull}25432[#25432] -* Initialise empty lists in BaseTaskResponse constructor {pull}25290[#25290] -* Extract a common base class for scroll executions {pull}24979[#24979] (issue: {issue}16555[#16555]) -* Obey lock order if working with store to get metadata snapshots {pull}24787[#24787] (issue: {issue}24481[#24481]) -* Fix Version based BWC and set correct minCompatVersion {pull}24732[#24732] -* Fix `_field_caps` serialization in order to support cross cluster search {pull}24722[#24722] -* Avoid race when shutting down controller processes {pull}24579[#24579] -* Fix handling of document failure exception in InternalEngine {pull}22718[#22718] -* Ensure remote cluster is connected before fetching `_field_caps` {pull}24845[#24845] (issue: {issue}24763[#24763]) - -Java API:: -* BulkProcessor flush runnable preserves the thread context from creation time {pull}26718[#26718] (issue: {issue}26596[#26596]) - -Java High Level REST Client:: -* Make RestHighLevelClient's Request class public {pull}26627[#26627] (issue: {issue}26455[#26455]) -* Forbid direct usage of ContentType.create() methods {pull}26457[#26457] (issues: {issue}22769[#22769], {issue}26438[#26438]) -* Make ShardSearchTarget optional when parsing ShardSearchFailure {pull}27078[#27078] (issue: {issue}27055[#27055]) - -Java REST Client:: -* Better message text for ResponseException {pull}26564[#26564] -* rest-client-sniffer: configurable threadfactory {pull}26897[#26897] - -Logging:: -* Allow not configure logging without config {pull}26209[#26209] (issues: {issue}20575[#20575], {issue}24076[#24076]) - -Mapping:: -* Allow copying from a field to another field that belongs to the same nested object. {pull}26774[#26774] (issue: {issue}26763[#26763]) -* Fixed bug that mapper_parsing_exception is thrown for numeric field with ignore_malformed=true when inserting "NaN" {pull}25967[#25967] (issue: {issue}25289[#25289]) -* Coerce decimal strings for whole number types by truncating the decimal part {pull}25835[#25835] (issue: {issue}25819[#25819]) -* Fix parsing of ip range queries. {pull}25768[#25768] (issue: {issue}25636[#25636]) -* Disable date field mapping changing {pull}25285[#25285] (issue: {issue}25271[#25271]) -* Correctly enable _all for older 5.x indices {pull}25087[#25087] (issue: {issue}25068[#25068]) -* token_count datatype should handle null value {pull}25046[#25046] (issue: {issue}24928[#24928]) -* keep _parent field while updating child type mapping {pull}24407[#24407] (issue: {issue}23381[#23381]) -* ICUCollationKeywordFieldMapper use SortedSetDocValuesField {pull}26267[#26267] -* Fix serialization of the `_all` field. {pull}26143[#26143] (issue: {issue}26136[#26136]) - -More Like This:: -* Pass over _routing value with more_like_this items to be retrieved {pull}24679[#24679] (issue: {issue}23699[#23699]) - -NOT CLASSIFIED:: -* DocumentMissingException during Logstash scripted upsert [ISSUE] {pull}27148[#27148] -* An assertion trips when master opens an index from before 5.x [ISSUE] {pull}24809[#24809] - -Nested Docs:: -* In case of a single type the _id field should be added to the nested document instead of _uid field {pull}25149[#25149] -* Inner hits source filtering not working [ISSUE] {pull}23090[#23090] - -Network:: -* Fixed ByteBuf leaking in org.elasticsearch.http.netty4.Netty4HttpRequestHandler {pull}27222[#27222] (issues: {issue}3[#3], {issue}4[#4], {issue}5[#5], {issue}6[#6]) -* Check for closed connection while opening {pull}26932[#26932] -* Ensure pending transport handlers are invoked for all channel failures {pull}25150[#25150] -* Notify onConnectionClosed rather than onNodeDisconnect to prune transport handlers {pull}24639[#24639] (issues: {issue}24557[#24557], {issue}24575[#24575], {issue}24632[#24632]) -* Release pipelined http responses on close {pull}26226[#26226] -* Fix error message if an incompatible node connects {pull}24884[#24884] - -Packaging:: -* Fix handling of Windows paths containing parentheses {pull}26916[#26916] (issue: {issue}26454[#26454]) -* Exit Windows scripts promptly on failure {pull}25959[#25959] -* Pass config path as a system property {pull}25943[#25943] -* ES_HOME needs to be made absolute before attempt at traversal {pull}25865[#25865] -* Fix elasticsearch-keystore handling of path.conf {pull}25811[#25811] -* Stop disabling explicit GC {pull}25759[#25759] -* Avoid failing install if system-sysctl is masked {pull}25657[#25657] (issue: {issue}24234[#24234]) -* Get short path name for native controllers {pull}25344[#25344] -* When stopping via systemd only kill the JVM, not its control group {pull}25195[#25195] -* remove remaining references to scripts directory {pull}24771[#24771] -* Handle parentheses in batch file path {pull}24731[#24731] (issue: {issue}24712[#24712]) -* Detect modified keystore on package removal {pull}26300[#26300] -* Create keystore on RPM and Debian package install {pull}26282[#26282] -* Add safer empty variable checking for Windows {pull}26268[#26268] (issue: {issue}26261[#26261]) -* Export HOSTNAME environment variable {pull}26262[#26262] (issues: {issue}25807[#25807], {issue}26255[#26255]) -* Fix daemonization command status test {pull}26196[#26196] (issue: {issue}26080[#26080]) -* Set RuntimeDirectory in systemd service {pull}23526[#23526] - -Parent/Child:: -* The default _parent field should not try to load global ordinals {pull}25851[#25851] (issue: {issue}25849[#25849]) - -Percolator:: -* Also support query extraction for queries wrapped inside a ESToParentBlockJoinQuery {pull}26754[#26754] -* Fix range queries with date range based on current time in percolator queries. {pull}24666[#24666] (issue: {issue}23921[#23921]) - -Plugin Analysis Kuromoji:: -* Fix kuromoji default stoptags {pull}26600[#26600] (issue: {issue}26519[#26519]) - -Plugin Analysis Phonetic:: -* Fix beidermorse phonetic token filter for unspecified `languageset` {pull}27112[#27112] (issue: {issue}26771[#26771]) - -Plugin Discovery File:: -* Fix discovery-file plugin to use custom config path {pull}26662[#26662] (issue: {issue}26660[#26660]) - -Plugin Ingest Attachment:: -* Add missing mime4j library {pull}22764[#22764] (issue: {issue}22077[#22077]) - -Plugin Lang Painless:: -* Painless: allow doubles to be casted to longs. {pull}25936[#25936] - -Plugin Repository Azure:: -* Azure snapshots can not be restored anymore {pull}26778[#26778] (issues: {issue}22858[#22858], {issue}26751[#26751], {issue}26777[#26777]) -* Snapshot : azure module - accelerate the listing of files (used in delete snapshot) {pull}25710[#25710] (issue: {issue}25424[#25424]) -* Use Azure upload method instead of our own implementation {pull}26751[#26751] -* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) - -Plugin Repository GCS:: -* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) - -Plugin Repository HDFS:: -* Add Log4j to SLF4J binding for repository-hdfs {pull}26514[#26514] (issue: {issue}26512[#26512]) -* Upgrading HDFS Repository Plugin to use HDFS 2.8.1 Client {pull}25497[#25497] (issue: {issue}25450[#25450]) - -Plugin Repository S3:: -* Avoid SecurityException in repository-S3 on DefaultS3OutputStream.flush() {pull}25254[#25254] (issue: {issue}25192[#25192]) -* Wrap getCredentials() in a doPrivileged() block {pull}23297[#23297] (issues: {issue}22534[#22534], {issue}23271[#23271]) - -Plugins:: -* X-Pack plugin download fails on Windows desktop [ISSUE] {pull}24570[#24570] -* Fix plugin installation permissions {pull}24527[#24527] (issue: {issue}24480[#24480]) - -Query DSL:: -* Fixed incomplete JSON body on count request making org.elasticsearch.rest.action.RestActions#parseTopLevelQueryBuilder go into endless loop {pull}26680[#26680] (issue: {issue}26083[#26083]) -* SpanNearQueryBuilder should return the inner clause when a single clause is provided {pull}25856[#25856] (issue: {issue}25630[#25630]) -* Refactor field expansion for match, multi_match and query_string query {pull}25726[#25726] (issues: {issue}25551[#25551], {issue}25556[#25556]) -* WrapperQueryBuilder should also rewrite the parsed query {pull}25480[#25480] - -REST:: -* Rest test fixes {pull}27354[#27354] -* Fix inconsistencies in the rest api specs for cat.snapshots {pull}26996[#26996] (issues: {issue}25737[#25737], {issue}26923[#26923]) -* Fix inconsistencies in the rest api specs for *_script {pull}26971[#26971] (issue: {issue}26923[#26923]) -* exists template needs a template name {pull}25988[#25988] -* Fix handling of invalid error trace parameter {pull}25785[#25785] (issue: {issue}25774[#25774]) -* Fix handling of exceptions thrown on HEAD requests {pull}25172[#25172] (issue: {issue}21125[#21125]) -* Fixed NPEs caused by requests without content. {pull}23497[#23497] (issue: {issue}24701[#24701]) -* Fix get mappings HEAD requests {pull}23192[#23192] (issue: {issue}21125[#21125]) - -Recovery:: -* Close translog view after primary-replica resync {pull}25862[#25862] (issue: {issue}24841[#24841]) - -Reindex API:: -* Fix update_by_query's default size parameter {pull}26784[#26784] (issue: {issue}26761[#26761]) -* Reindex: don't duplicate _source parameter {pull}24629[#24629] (issue: {issue}24628[#24628]) -* Add qa module that tests reindex-from-remote against pre-5.0 versions of Elasticsearch {pull}24561[#24561] (issues: {issue}23828[#23828], {issue}24520[#24520]) - -Scroll:: -* Fix single shard scroll within a cluster with nodes in version `>= 5.3` and `<= 5.3` {pull}24512[#24512] - -Search:: -* Fail query when a sort is provided in conjunction with rescorers {pull}26510[#26510] -* Let search phases override max concurrent requests {pull}26484[#26484] (issue: {issue}26198[#26198]) -* Avoid stack overflow on search phases {pull}27069[#27069] (issue: {issue}27042[#27042]) -* Fix search_after with geo distance sorting {pull}26891[#26891] -* Fix serialization errors when cross cluster search goes to a single shard {pull}26881[#26881] (issue: {issue}26833[#26833]) -* Early termination with index sorting should not set terminated_early in the response {pull}26597[#26597] (issue: {issue}26408[#26408]) -* Format doc values fields. {pull}22146[#22146] -* Fix term(s) query for range field {pull}25918[#25918] -* Caching a MinDocQuery can lead to wrong results. {pull}25909[#25909] -* Fix random score generation when no seed is provided. {pull}25908[#25908] -* Merge FunctionScoreQuery and FiltersFunctionScoreQuery {pull}25889[#25889] (issues: {issue}15709[#15709], {issue}23628[#23628]) -* Respect cluster alias in `_index` aggs and queries {pull}25885[#25885] (issue: {issue}25606[#25606]) -* First increment shard stats before notifying and potentially sending response {pull}25818[#25818] -* Remove assertion about deviation when casting to a float. {pull}25806[#25806] (issue: {issue}25330[#25330]) -* Prevent skipping shards if a suggest builder is present {pull}25739[#25739] (issue: {issue}25658[#25658]) -* Ensure remote cluster alias is preserved in inner hits aggs {pull}25627[#25627] (issue: {issue}25606[#25606]) -* Do not search locally if remote index pattern resolves to no indices {pull}25436[#25436] (issue: {issue}25426[#25426]) -* Adds check for negative search request size {pull}25397[#25397] (issue: {issue}22530[#22530]) -* Make sure range queries are correctly profiled. {pull}25108[#25108] -* Fix RangeFieldMapper rangeQuery to properly handle relations {pull}24808[#24808] (issue: {issue}24744[#24744]) -* Fix ExpandSearchPhase when response contains no hits {pull}24688[#24688] (issue: {issue}24672[#24672]) -* Refactor simple_query_string to handle text part like multi_match and query_string {pull}26145[#26145] (issue: {issue}25726[#25726]) -* Fix `_exists_` in query_string on empty indices. {pull}25993[#25993] (issue: {issue}25956[#25956]) -* Fix script field sort returning Double.MAX_VALUE for all documents {pull}24942[#24942] (issue: {issue}24940[#24940]) -* Compute the took time of the query after the expand phase of field collapsing {pull}24902[#24902] (issue: {issue}24900[#24900]) - -Sequence IDs:: -* Fire global checkpoint sync under system context {pull}26984[#26984] -* Fix pre-6.0 response to unknown replication actions {pull}25744[#25744] (issue: {issue}10708[#10708]) -* Track local checkpoint on primary immediately {pull}25434[#25434] (issues: {issue}10708[#10708], {issue}25355[#25355], {issue}25415[#25415]) -* Initialize max unsafe auto ID timestamp on shrink {pull}25356[#25356] (issues: {issue}10708[#10708], {issue}25355[#25355]) -* Use correct primary term for replicating NOOPs {pull}25128[#25128] -* Handle already closed while filling gaps {pull}25021[#25021] (issue: {issue}24925[#24925]) -* TranslogWriter.assertNoSeqNumberConflict failure [ISSUE] {pull}26710[#26710] -* Avoid losing ops in file-based recovery {pull}22945[#22945] (issue: {issue}22484[#22484]) -* Handle primary failure handling replica response {pull}24926[#24926] (issue: {issue}24935[#24935]) - -Settings:: -* Emit settings deprecation logging on empty update {pull}27017[#27017] (issue: {issue}26419[#26419]) -* Fix filtering for ListSetting {pull}26914[#26914] -* Fix settings serialization to not serialize secure settings or not take the total size into account {pull}25323[#25323] -* Keystore CLI should use the AddFileKeyStoreCommand for files {pull}25298[#25298] -* Allow resetting settings that use an IP validator {pull}24713[#24713] (issue: {issue}24709[#24709]) -* Updating an unrecognized setting should error out with that reason [ISSUE] {pull}25607[#25607] -* Settings: Fix setting groups to include secure settings {pull}25076[#25076] (issue: {issue}25069[#25069]) - -Similarities:: -* Add boolean similarity to built in similarity types {pull}26613[#26613] - -Snapshot/Restore:: -* Snapshot/Restore: better handle incorrect chunk_size settings in FS repo {pull}26844[#26844] (issue: {issue}26843[#26843]) -* Snapshot/Restore: Ensure that shard failure reasons are correctly stored in CS {pull}25941[#25941] (issue: {issue}25878[#25878]) -* Output all empty snapshot info fields if in verbose mode {pull}25455[#25455] (issue: {issue}24477[#24477]) -* Remove redundant and broken MD5 checksum from repository-s3 {pull}25270[#25270] (issue: {issue}25269[#25269]) -* Consolidates the logic for cleaning up snapshots on master election {pull}24894[#24894] (issue: {issue}24605[#24605]) -* Removes completed snapshot from cluster state on master change {pull}24605[#24605] (issue: {issue}24452[#24452]) -* Keep snapshot restore state and routing table in sync {pull}20836[#20836] (issue: {issue}19774[#19774]) -* Master failover during snapshotting could leave the snapshot incomplete [OPEN] [ISSUE] {pull}25281[#25281] -* Fix inefficient (worst case exponential) loading of snapshot repository {pull}24510[#24510] (issue: {issue}24509[#24509]) - -Stats:: -* Fix RestGetAction name typo {pull}27266[#27266] -* Keep cumulative elapsed scroll time in microseconds {pull}27068[#27068] (issue: {issue}27046[#27046]) -* _nodes/stats should not fail due to concurrent AlreadyClosedException {pull}25016[#25016] (issue: {issue}23099[#23099]) -* Avoid double decrement on current query counter {pull}24922[#24922] (issues: {issue}22996[#22996], {issue}24872[#24872]) -* Adjust available and free bytes to be non-negative on huge FSes {pull}24911[#24911] (issues: {issue}23093[#23093], {issue}24453[#24453]) - -Suggesters:: -* Fix division by zero in phrase suggester that causes assertion to fail {pull}27149[#27149] -* Context suggester should filter doc values field {pull}25858[#25858] (issue: {issue}25404[#25404]) -* Fix context suggester to read values from keyword type field {pull}24200[#24200] (issue: {issue}24129[#24129]) - -Templates:: -* Tests: Fix FullClusterRestartIT.testSnapshotRestore test failing in 6.x {pull}27218[#27218] (issue: {issue}27213[#27213]) - -Translog:: -* Fix Translog.Delete serialization for sequence numbers {pull}22543[#22543] - -Upgrade API:: -* Upgrade API: fix excessive logging and unnecessary template updates {pull}26698[#26698] (issue: {issue}26673[#26673]) - - - -[[regression-6.0.0]] -[float] -=== Regressions - -Bulk:: -* Only re-parse operation if a mapping update was needed {pull}23832[#23832] (issue: {issue}23665[#23665]) - -Highlighting:: -* Fix Fast Vector Highlighter NPE on match phrase prefix {pull}25116[#25116] (issue: {issue}25088[#25088]) - -Search:: -* Always use DisjunctionMaxQuery to build cross fields disjunction {pull}25115[#25115] (issue: {issue}23966[#23966]) - -Sequence IDs:: -* Indexing performance degradation in 6.0.0-beta1 [ISSUE] {pull}26339[#26339] - - - -[[upgrade-6.0.0]] -[float] -=== Upgrades - -Core:: -* Upgrade to Lucene 7.0.0 {pull}26744[#26744] -* Upgrade to lucene-7.0.0-snapshot-d94a5f0. {pull}26441[#26441] -* Upgrade to lucene-7.0.0-snapshot-a128fcb. {pull}26090[#26090] -* Upgrade to a Lucene 7 snapshot {pull}24089[#24089] (issues: {issue}23966[#23966], {issue}24086[#24086], {issue}24087[#24087], {issue}24088[#24088]) - -Logging:: -* Upgrade to Log4j 2.9.1 {pull}26750[#26750] (issues: {issue}109[#109], {issue}26464[#26464], {issue}26467[#26467]) -* Upgrade to Log4j 2.9.0 {pull}26450[#26450] (issue: {issue}23798[#23798]) - -Network:: -* Upgrade to Netty 4.1.13.Final {pull}25581[#25581] (issues: {issue}24729[#24729], {issue}6866[#6866]) -* Upgrade to Netty 4.1.11.Final {pull}24652[#24652] - -Plugin Ingest Attachment:: -* Update to Tika 1.14 {pull}21591[#21591] (issue: {issue}20390[#20390]) - -Upgrade API:: -* Improve stability and logging of TemplateUpgradeServiceIT tests {pull}25386[#25386] (issue: {issue}25382[#25382]) - - diff --git a/docs/reference/release-notes/6.0.1.asciidoc b/docs/reference/release-notes/6.0.1.asciidoc deleted file mode 100644 index 6bab0f3af530e..0000000000000 --- a/docs/reference/release-notes/6.0.1.asciidoc +++ /dev/null @@ -1,107 +0,0 @@ -[[release-notes-6.0.1]] -== 6.0.1 Release Notes - -Also see <>. - -[[breaking-6.0.1]] -[float] -=== Breaking changes - -Scroll:: -* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] - - - -[[enhancement-6.0.1]] -[float] -=== Enhancements - -Core:: -* Fix classes that can exit {pull}27518[#27518] - -Discovery:: -* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) - -Plugin Repository S3:: -* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) -* Update to AWS SDK 1.11.223 {pull}27278[#27278] - -Search:: -* fix unnecessary logger creation {pull}27349[#27349] - -Sequence IDs:: -* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) - -Snapshot/Restore:: -* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] - - - -[[bug-6.0.1]] -[float] -=== Bug fixes - -Cluster:: -* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] - -Core:: -* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) -* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] -* Avoid NPE when getting build information {pull}27442[#27442] -* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) - -Engine:: -* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) -* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) - -Inner Hits:: -* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] - -Mapping:: -* Fix dynamic mapping update generation. {pull}27467[#27467] -* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) - -Nested Docs:: -* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] - -Network:: -* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) -* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) - -Plugin Lang Painless:: -* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) - -Plugin Repository GCS:: -* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) - -Recovery:: -* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) - -Reindex API:: -* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) - -Search:: -* Fix profiling naming issues {pull}27133[#27133] - -Sequence IDs:: -* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) - -Snapshot/Restore:: -* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] -* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) -* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) -* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) -* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) - - - -[[upgrade-6.0.1]] -[float] -=== Upgrades - -Plugin Discovery EC2:: -* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) - -Plugin Discovery GCE:: -* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) - diff --git a/docs/reference/release-notes/6.1.0.asciidoc b/docs/reference/release-notes/6.1.0.asciidoc deleted file mode 100644 index 2f3d0750e2d4f..0000000000000 --- a/docs/reference/release-notes/6.1.0.asciidoc +++ /dev/null @@ -1,407 +0,0 @@ -[[release-notes-6.1.0]] -== 6.1.0 Release Notes - -Also see <>. - -[[breaking-6.1.0]] -[float] -=== Breaking changes - -Network:: -* Allow only a fixed-size receive predictor {pull}26165[#26165] (issue: {issue}23185[#23185]) - -REST:: -* Standardize underscore requirements in parameters {pull}27414[#27414] (issues: {issue}26886[#26886], {issue}27040[#27040]) - -Scroll:: -* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] - -Search:: -* Add a limit to from + size in top_hits and inner hits. {pull}26492[#26492] (issue: {issue}11511[#11511]) - - - -[[breaking-java-6.1.0]] -[float] -=== Breaking Java changes - -Aggregations:: -* Moves deferring code into its own subclass {pull}26421[#26421] - -Core:: -* Unify Settings xcontent reading and writing {pull}26739[#26739] - -Settings:: -* Return List instead of an array from settings {pull}26903[#26903] -* Remove `Settings,put(Map)` {pull}26785[#26785] - - - -[[deprecation-6.1.0]] -[float] -=== Deprecations - -Aggregations:: -* Deprecate global_ordinals_hash and global_ordinals_low_cardinality {pull}26173[#26173] (issue: {issue}26014[#26014]) - -Allocation:: -* Add deprecation warning for negative index.unassigned.node_left.delayed_timeout {pull}26832[#26832] (issue: {issue}26828[#26828]) - -Analysis:: -* Add limits for ngram and shingle settings {pull}27411[#27411] (issues: {issue}25887[#25887], {issue}27211[#27211]) - -Geo:: -* [GEO] 6x Deprecate ShapeBuilders and decouple geojson parse logic {pull}27345[#27345] - -Mapping:: -* Deprecate the `index_options` parameter for numeric fields {pull}26672[#26672] (issue: {issue}21475[#21475]) - -Plugin Repository Azure:: -* Azure repository: Move to named configurations as we do for S3 repository and secure settings {pull}23405[#23405] (issues: {issue}22762[#22762], {issue}22763[#22763]) - -Search:: -* doc: deprecate _primary and _replica shard option {pull}26792[#26792] (issue: {issue}26335[#26335]) - - - -[[feature-6.1.0]] -[float] -=== New features - -Aggregations:: -* Aggregations: bucket_sort pipeline aggregation {pull}27152[#27152] (issue: {issue}14928[#14928]) -* Add composite aggregator {pull}26800[#26800] - -Analysis:: -* Added Bengali Analyzer to Elasticsearch with respect to the lucene update {pull}26527[#26527] - -Ingest:: -* add URL-Decode Processor to Ingest {pull}26045[#26045] (issue: {issue}25837[#25837]) - -Java High Level REST Client:: -* Added Delete Index support to high-level REST client {pull}27019[#27019] (issue: {issue}25847[#25847]) - -Nested Docs:: -* Multi-level Nested Sort with Filters {pull}26395[#26395] - -Query DSL:: -* Add terms_set query {pull}27145[#27145] (issue: {issue}26915[#26915]) -* Introduce sorted_after query for sorted index {pull}26377[#26377] -* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}26097[#26097] - -Search:: -* Expose `fuzzy_transpositions` parameter in fuzzy queries {pull}26870[#26870] (issue: {issue}18348[#18348]) -* Add upper limit for scroll expiry {pull}26448[#26448] (issues: {issue}11511[#11511], {issue}23268[#23268]) -* Implement adaptive replica selection {pull}26128[#26128] (issue: {issue}24915[#24915]) -* configure distance limit {pull}25731[#25731] (issue: {issue}25528[#25528]) - -Similarities:: -* Add a scripted similarity. {pull}25831[#25831] - -Suggesters:: -* Expose duplicate removal in the completion suggester {pull}26496[#26496] (issue: {issue}23364[#23364]) -* Support must and should for context query in context suggester {pull}26407[#26407] (issues: {issue}24421[#24421], {issue}24565[#24565]) - - - -[[enhancement-6.1.0]] -[float] -=== Enhancements - -Aggregations:: -* Allow aggregation sorting via nested aggregation {pull}26683[#26683] (issue: {issue}16838[#16838]) - -Allocation:: -* Tie-break shard path decision based on total number of shards on path {pull}27039[#27039] (issue: {issue}26654[#26654]) -* Balance shards for an index more evenly across multiple data paths {pull}26654[#26654] (issue: {issue}16763[#16763]) -* Expand "NO" decision message in NodeVersionAllocationDecider {pull}26542[#26542] (issue: {issue}10403[#10403]) -* _reroute's retry_failed flag should reset failure counter {pull}25888[#25888] (issue: {issue}25291[#25291]) - -Analysis:: -* Add configurable `max_token_length` parameter to whitespace tokenizer {pull}26749[#26749] (issue: {issue}26643[#26643]) - -CRUD:: -* Add wait_for_active_shards parameter to index open command {pull}26682[#26682] (issue: {issue}20937[#20937]) - -Core:: -* Fix classes that can exit {pull}27518[#27518] -* Replace empty index block checks with global block checks in template delete/put actions {pull}27050[#27050] (issue: {issue}10530[#10530]) -* Allow Uid#decodeId to decode from a byte array slice {pull}26987[#26987] (issue: {issue}26931[#26931]) -* Use separate searchers for "search visibility" vs "move indexing buffer to disk {pull}26972[#26972] (issues: {issue}15768[#15768], {issue}26802[#26802], {issue}26912[#26912], {issue}3593[#3593]) -* Add ability to split shards {pull}26931[#26931] -* Make circuit breaker mutations debuggable {pull}26067[#26067] (issue: {issue}25891[#25891]) - -Dates:: -* DateProcessor Locale {pull}26186[#26186] (issue: {issue}25513[#25513]) - -Discovery:: -* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) - -Engine:: -* Ensure external refreshes will also refresh internal searcher to minimize segment creation {pull}27253[#27253] (issue: {issue}26972[#26972]) -* Move IndexShard#getWritingBytes() under InternalEngine {pull}27209[#27209] (issue: {issue}26972[#26972]) -* Refactor internal engine {pull}27082[#27082] - -Geo:: -* Add ignore_malformed to geo_shape fields {pull}24654[#24654] (issue: {issue}23747[#23747]) - -Ingest:: -* add json-processor support for non-map json types {pull}27335[#27335] (issue: {issue}25972[#25972]) -* Introduce templating support to timezone/locale in DateProcessor {pull}27089[#27089] (issue: {issue}24024[#24024]) -* Add support for parsing inline script (#23824) {pull}26846[#26846] (issue: {issue}23824[#23824]) -* Consolidate locale parsing. {pull}26400[#26400] -* Accept ingest simulate params as ints or strings {pull}23885[#23885] (issue: {issue}23823[#23823]) - -Internal:: -* Avoid uid creation in ParsedDocument {pull}27241[#27241] -* Upgrade to Lucene 7.1.0 snapshot version {pull}26864[#26864] (issue: {issue}26527[#26527]) -* Remove `_index` fielddata hack if cluster alias is present {pull}26082[#26082] (issue: {issue}25885[#25885]) - -Java High Level REST Client:: -* Adjust RestHighLevelClient method modifiers {pull}27238[#27238] -* Decouple BulkProcessor from ThreadPool {pull}26727[#26727] (issue: {issue}26028[#26028]) - -Logging:: -* Add more information on _failed_to_convert_ exception (#21946) {pull}27034[#27034] (issue: {issue}21946[#21946]) -* Improve shard-failed log messages. {pull}26866[#26866] - -Mapping:: -* Allow ip_range to accept CIDR notation {pull}27192[#27192] (issue: {issue}26260[#26260]) -* Deduplicate `_field_names`. {pull}26550[#26550] -* Throw a better error message for empty field names {pull}26543[#26543] (issue: {issue}23348[#23348]) -* Stricter validation for min/max values for whole numbers {pull}26137[#26137] -* Make FieldMapper.copyTo() always non-null. {pull}25994[#25994] - -Nested Docs:: -* Use the primary_term field to identify parent documents {pull}27469[#27469] (issue: {issue}24362[#24362]) -* Prohibit using `nested_filter`, `nested_path` and new `nested` Option at the same time in FieldSortBuilder {pull}26490[#26490] (issue: {issue}17286[#17286]) - -Network:: -* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) -* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) -* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) - -Percolator:: -* Use Lucene's CoveringQuery to select percolate candidate matches {pull}27271[#27271] (issues: {issue}26081[#26081], {issue}26307[#26307]) -* Add support to percolate query to percolate multiple documents simultaneously {pull}26418[#26418] -* Hint what clauses are important in a conjunction query based on fields {pull}26081[#26081] -* Add support for selecting percolator query candidate matches containing range queries {pull}25647[#25647] (issue: {issue}21040[#21040]) - -Plugin Discovery EC2:: -* update AWS SDK for ECS Task IAM support in discovery-ec2 {pull}26479[#26479] (issue: {issue}23039[#23039]) - -Plugin Lang Painless:: -* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] -* Allow for the Painless Definition to have multiple instances for white-listing {pull}27096[#27096] -* Separate Painless Whitelist Loading from the Painless Definition {pull}26540[#26540] -* Remove Sort enum from Painless Definition {pull}26179[#26179] - -Plugin Repository Azure:: -* Add azure storage endpoint suffix #26432 {pull}26568[#26568] (issue: {issue}26432[#26432]) -* Support for accessing Azure repositories through a proxy {pull}23518[#23518] (issues: {issue}23506[#23506], {issue}23517[#23517]) - -Plugin Repository S3:: -* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) -* Update to AWS SDK 1.11.223 {pull}27278[#27278] - -Plugins:: -* Plugins: Add versionless alias to all security policy codebase properties {pull}26756[#26756] (issue: {issue}26521[#26521]) -* Allow plugins to plug rescore implementations {pull}26368[#26368] (issue: {issue}26208[#26208]) - -Query DSL:: -* Add support for wildcard on `_index` {pull}27334[#27334] (issue: {issue}25722[#25722]) - -Reindex API:: -* Update by Query is modified to accept short `script` parameter. {pull}26841[#26841] (issue: {issue}24898[#24898]) -* reindex: automatically choose the number of slices {pull}26030[#26030] (issues: {issue}24547[#24547], {issue}25582[#25582]) - -Rollover:: -* Add size-based condition to the index rollover API {pull}27160[#27160] (issue: {issue}27004[#27004]) -* Add size-based condition to the index rollover API {pull}27115[#27115] (issue: {issue}27004[#27004]) - -Scripting:: -* Script: Convert script query to a dedicated script context {pull}26003[#26003] - -Search:: -* Make fields optional in multi_match query and rely on index.query.default_field by default {pull}27380[#27380] -* fix unnecessary logger creation {pull}27349[#27349] -* `ObjectParser` : replace `IllegalStateException` with `ParsingException` {pull}27302[#27302] (issue: {issue}27147[#27147]) -* Uses norms for exists query if enabled {pull}27237[#27237] -* Cross Cluster Search: make remote clusters optional {pull}27182[#27182] (issues: {issue}26118[#26118], {issue}27161[#27161]) -* Enhances exists queries to reduce need for `_field_names` {pull}26930[#26930] (issue: {issue}26770[#26770]) -* Change ParentFieldSubFetchPhase to create doc values iterator once per segment {pull}26815[#26815] -* Change VersionFetchSubPhase to create doc values iterator once per segment {pull}26809[#26809] -* Change ScriptFieldsFetchSubPhase to create search scripts once per segment {pull}26808[#26808] (issue: {issue}26775[#26775]) -* Make sure SortBuilders rewrite inner nested sorts {pull}26532[#26532] -* Extend testing of build method in ScriptSortBuilder {pull}26520[#26520] (issues: {issue}17286[#17286], {issue}26490[#26490]) -* Accept an array of field names and boosts in the index.query.default_field setting {pull}26320[#26320] (issue: {issue}25946[#25946]) -* Reject IPv6-mapped IPv4 addresses when using the CIDR notation. {pull}26254[#26254] (issue: {issue}26078[#26078]) -* Rewrite range queries with open bounds to exists query {pull}26160[#26160] (issue: {issue}22640[#22640]) - -Sequence IDs:: -* Only fsync global checkpoint if needed {pull}27652[#27652] -* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) -* Lazy initialize checkpoint tracker bit sets {pull}27179[#27179] (issue: {issue}10708[#10708]) -* Returns the current primary_term for Get/MultiGet requests {pull}27177[#27177] (issue: {issue}26493[#26493]) - -Settings:: -* Allow affix settings to specify dependencies {pull}27161[#27161] -* Represent lists as actual lists inside Settings {pull}26878[#26878] (issue: {issue}26723[#26723]) -* Remove Settings#getAsMap() {pull}26845[#26845] -* Replace group map settings with affix setting {pull}26819[#26819] -* Throw exception if setting isn't recognized {pull}26569[#26569] (issue: {issue}25607[#25607]) -* Settings: Move keystore creation to plugin installation {pull}26329[#26329] (issue: {issue}26309[#26309]) - -Snapshot/Restore:: -* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] -* Snapshot: Migrate TransportRequestHandler to TransportMasterNodeAction {pull}27165[#27165] (issue: {issue}27151[#27151]) -* Fix toString of class SnapshotStatus (#26851) {pull}26852[#26852] (issue: {issue}26851[#26851]) - -Stats:: -* Adds average document size to DocsStats {pull}27117[#27117] (issue: {issue}27004[#27004]) -* Stats to record how often the ClusterState diff mechanism is used successfully {pull}27107[#27107] (issue: {issue}26973[#26973]) -* Expose adaptive replica selection stats in /_nodes/stats API {pull}27090[#27090] -* Add cgroup memory usage/limit to OS stats on Linux {pull}26166[#26166] -* Add segment attributes to the `_segments` API. {pull}26157[#26157] (issue: {issue}26130[#26130]) - -Suggesters:: -* Improve error message for parse failures of completion fields {pull}27297[#27297] -* Support 'AND' operation for context query in context suggester {pull}24565[#24565] (issue: {issue}24421[#24421]) - - - -[[bug-6.1.0]] -[float] -=== Bug fixes - -Aggregations:: -* Disable the "low cardinality" optimization of terms aggregations. {pull}27545[#27545] (issue: {issue}27543[#27543]) -* scripted_metric _agg parameter disappears if params are provided {pull}27159[#27159] (issues: {issue}19768[#19768], {issue}19863[#19863]) - -Cluster:: -* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] -* Remove optimisations to reuse objects when applying a new `ClusterState` {pull}27317[#27317] - -Core:: -* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) -* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] -* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) -* Protect shard splitting from illegal target shards {pull}27468[#27468] (issue: {issue}26931[#26931]) -* Avoid NPE when getting build information {pull}27442[#27442] -* Fix `ShardSplittingQuery` to respect nested documents. {pull}27398[#27398] (issue: {issue}27378[#27378]) -* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) - -Engine:: -* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) -* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) - -Geo:: -* Correct two equality checks on incomparable types {pull}27688[#27688] -* [GEO] fix pointsOnly bug for MULTIPOINT {pull}27415[#27415] - -Index Templates:: -* Prevent constructing an index template without index patterns {pull}27662[#27662] - -Ingest:: -* Add pipeline support for REST API bulk upsert {pull}27075[#27075] (issue: {issue}25601[#25601]) -* Fixing Grok pattern for Apache 2.4 {pull}26635[#26635] - -Inner Hits:: -* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] - -Internal:: -* When checking if key exists in ThreadContextStruct:putHeaders() method,should put requestHeaders in map first {pull}26068[#26068] -* Adding a refresh listener to a recovering shard should be a noop {pull}26055[#26055] - -Java High Level REST Client:: -* Register ip_range aggregation with the high level client {pull}26383[#26383] -* add top hits as a parsed aggregation to the rest high level client {pull}26370[#26370] - -Mapping:: -* Fix dynamic mapping update generation. {pull}27467[#27467] -* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) -* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) - -Nested Docs:: -* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] -* Prevent duplicate fields when mixing parent and root nested includes {pull}27072[#27072] (issue: {issue}26990[#26990]) - -Network:: -* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) -* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) -* Do not set SO_LINGER on server channels {pull}26997[#26997] -* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) -* Close TcpTransport on RST in some Spots to Prevent Leaking TIME_WAIT Sockets {pull}26764[#26764] (issue: {issue}26701[#26701]) - -Packaging:: -* Removes minimum master nodes default number {pull}26803[#26803] -* setgid on /etc/elasticearch on package install {pull}26412[#26412] (issue: {issue}26410[#26410]) - -Percolator:: -* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) - -Plugin Analysis ICU:: -* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] - -Plugin Lang Painless:: -* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) -* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) - -Plugin Repository GCS:: -* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) - -Recovery:: -* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) - -Reindex API:: -* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) - -Scroll:: -* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] - -Search:: -* Fix profiling naming issues {pull}27133[#27133] -* Fix max score tracking with field collapsing {pull}27122[#27122] (issue: {issue}23840[#23840]) -* Apply missing request options to the expand phase {pull}27118[#27118] (issues: {issue}26649[#26649], {issue}27079[#27079]) -* Calculate and cache result when advanceExact is called {pull}26920[#26920] (issue: {issue}26817[#26817]) -* Filter unsupported relation for RangeQueryBuilder {pull}26620[#26620] (issue: {issue}26575[#26575]) -* Handle leniency for phrase query on a field indexed without positions {pull}26388[#26388] - -Sequence IDs:: -* Obey translog durability in global checkpoint sync {pull}27641[#27641] -* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) - -Settings:: -* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) - -Snapshot/Restore:: -* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] -* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) -* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) -* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) -* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) - -Stats:: -* Include internal refreshes in refresh stats {pull}27615[#27615] -* Make Segment statistics aware of segments hold by internal readers {pull}27558[#27558] -* Ensure `doc_stats` are changing even if refresh is disabled {pull}27505[#27505] - - - -[[upgrade-6.1.0]] -[float] -=== Upgrades - -Core:: -* Upgrade to Jackson 2.8.10 {pull}27230[#27230] -* Upgrade to Lucene 7.1 {pull}27225[#27225] - -Plugin Discovery EC2:: -* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) - -Plugin Discovery GCE:: -* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) - -Plugin Lang Painless:: -* Upgrade Painless from ANTLR 4.5.1-1 to ANTLR 4.5.3. {pull}27153[#27153] - diff --git a/docs/reference/release-notes/6.1.1.asciidoc b/docs/reference/release-notes/6.1.1.asciidoc deleted file mode 100644 index 03e283e03fc55..0000000000000 --- a/docs/reference/release-notes/6.1.1.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -[[release-notes-6.1.1]] -== 6.1.1 Release Notes - -Also see <>. - -[[enhancement-6.1.1]] -[float] -=== Enhancements - -Snapshot/Restore:: -* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] - - - -[[bug-6.1.1]] -[float] -=== Bug fixes - -Inner Hits:: -* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) - -Java REST Client:: -* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) - -Search:: -* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) - -Sequence IDs:: -* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] -* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) - - - -[[upgrade-6.1.1]] -[float] -=== Upgrades - -Ingest:: -* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] - diff --git a/docs/reference/release-notes/6.1.2.asciidoc b/docs/reference/release-notes/6.1.2.asciidoc deleted file mode 100644 index 5bc0da22bfc6a..0000000000000 --- a/docs/reference/release-notes/6.1.2.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -[[release-notes-6.1.2]] -== 6.1.2 Release Notes - -Also see <>. - -[[enhancement-6.1.2]] -[float] -=== Enhancements - -Internal:: -* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) - -[[bug-6.1.2]] -[float] -=== Bug fixes - -Aggregations:: -* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) -* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] -* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) - -Engine:: -* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) -* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) - -Network:: -* Only bind loopback addresses when binding to local {pull}28029[#28029] - -Recovery:: -* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) - -Search:: -* Use the underlying connection version for CCS connections {pull}28093[#28093] -* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) - -Snapshot/Restore:: -* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) - -Translog:: -* Only sync translog when global checkpoint increased {pull}27973[#27973] (issues: {issue}27837[#27837], {issue}27970[#27970]) diff --git a/docs/reference/release-notes/6.1.3.asciidoc b/docs/reference/release-notes/6.1.3.asciidoc deleted file mode 100644 index 18af582f99e47..0000000000000 --- a/docs/reference/release-notes/6.1.3.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[[release-notes-6.1.3]] -== 6.1.3 Release Notes - -[[bug-6.1.3]] -[float] -=== Bug fixes - -Engine:: -* Replica recovery could go into an endless flushing loop {pull}28350[#28350] - -Internal:: -* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) -* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) - -Mapping:: -* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) - -Scripting:: -* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] - -Settings:: -* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) -* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) - -Snapshot/Restore:: -* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) -* Do not start snapshots that are deleted during initialization {pull}27931[#27931] - - - - - - diff --git a/docs/reference/release-notes/6.1.4.asciidoc b/docs/reference/release-notes/6.1.4.asciidoc deleted file mode 100644 index 93e9c38448682..0000000000000 --- a/docs/reference/release-notes/6.1.4.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -[[release-notes-6.1.4]] -== 6.1.4 Release Notes - -Also see <>. - -[[enhancement-6.1.4]] -[float] -=== Enhancements - -Core:: -* Fix classes that can exit {pull}27518[#27518] - -[[bug-6.1.4]] -[float] -=== Bug fixes - -Aggregations:: -* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) - -Core:: -* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) - -Engine:: -* Avoid class cast exception from index writer {pull}28989[#28989] -* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) - -Scripting:: -* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) - - - diff --git a/docs/reference/release-notes/6.2.0.asciidoc b/docs/reference/release-notes/6.2.0.asciidoc deleted file mode 100644 index f48294855ced0..0000000000000 --- a/docs/reference/release-notes/6.2.0.asciidoc +++ /dev/null @@ -1,364 +0,0 @@ -[[release-notes-6.2.0]] -== 6.2.0 Release Notes - -Also see <>. - -[[breaking-6.2.0]] -[float] -=== Breaking changes - -Aggregations:: -* Add a new cluster setting to limit the total number of buckets returned by a request {pull}27581[#27581] (issues: {issue}26012[#26012], {issue}27452[#27452]) - -Core:: -* Forbid granting the all permission in production {pull}27548[#27548] - -Highlighting:: -* Limit the analyzed text for highlighting {pull}27934[#27934] (issue: {issue}27517[#27517]) - -Rollover:: -* Fail rollover if duplicated alias found in templates {pull}28110[#28110] (issue: {issue}26976[#26976]) - -Search:: -* Introduce limit to the number of terms in Terms Query {pull}27968[#27968] (issue: {issue}18829[#18829]) - - - -[[breaking-java-6.2.0]] -[float] -=== Breaking Java changes - -Java API:: -* Remove `operationThreaded` from Java API {pull}27836[#27836] - -Java High Level REST Client:: -* REST high-level client: remove index suffix from indices client method names {pull}28263[#28263] - - - -[[deprecation-6.2.0]] -[float] -=== Deprecations - -Analysis:: -* Backport delimited payload filter renaming {pull}27535[#27535] (issue: {issue}26625[#26625]) - -Suggesters:: -* deprecating `jarowinkler` in favor of `jaro_winkler` {pull}27526[#27526] -* Deprecating `levenstein` in favor of `levensHtein` {pull}27409[#27409] (issue: {issue}27325[#27325]) - - - -[[feature-6.2.0]] -[float] -=== New features - -Plugin Ingest GeoIp:: -* Enable ASN support for Ingest GeoIP plugin. {pull}27958[#27958] (issue: {issue}27849[#27849]) - -Plugin Lang Painless:: -* Painless: Add spi jar that will be published for extending whitelists {pull}28302[#28302] -* Painless: Add a simple cache for whitelist methods and fields. {pull}28142[#28142] - -Plugins:: -* Add the ability to bundle multiple plugins into a meta plugin {pull}28022[#28022] (issue: {issue}27316[#27316]) - -Rank Evaluation:: -* Backport of ranking evaluation API (#27478) {pull}27844[#27844] (issue: {issue}27478[#27478]) - -Recovery:: -* Backport for using lastSyncedGlobalCheckpoint in deletion policy {pull}27866[#27866] (issue: {issue}27826[#27826]) - -Reindex API:: -* Add scroll parameter to _reindex API {pull}28041[#28041] (issue: {issue}27555[#27555]) - - - -[[enhancement-6.2.0]] -[float] -=== Enhancements - -Allocation:: -* Fix cluster.routing.allocation.enable and cluster.routing.rebalance.enable case {pull}28037[#28037] (issue: {issue}28007[#28007]) -* Add node id to shard failure message {pull}28024[#28024] (issue: {issue}28018[#28018]) - -Analysis:: -* Limit the analyzed text for highlighting (#27934) {pull}28176[#28176] (issue: {issue}27517[#27517]) -* Allow TrimFilter to be used in custom normalizers {pull}27758[#27758] (issue: {issue}27310[#27310]) - -Circuit Breakers:: -* Add accounting circuit breaker and track segment memory usage {pull}27116[#27116] (issue: {issue}27044[#27044]) - -Cluster:: -* Adds wait_for_no_initializing_shards to cluster health API {pull}27489[#27489] (issue: {issue}25623[#25623]) - -Core:: -* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) -* Rename core module to server {pull}28190[#28190] (issue: {issue}27933[#27933]) -* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) -* Introduce elasticsearch-core jar {pull}28178[#28178] (issue: {issue}27933[#27933]) -* Add Writeable.Reader support to TransportResponseHandler {pull}28010[#28010] (issue: {issue}26315[#26315]) -* Simplify rejected execution exception {pull}27664[#27664] (issue: {issue}27663[#27663]) -* Add node name to thread pool executor name {pull}27663[#27663] (issues: {issue}26007[#26007], {issue}26835[#26835]) - -Discovery:: -* Add information when master node left to DiscoveryNodes' shortSummary() {pull}28197[#28197] (issue: {issue}28169[#28169]) - -Engine:: -* Move uid lock into LiveVersionMap {pull}27905[#27905] -* Optimize version map for append-only indexing {pull}27752[#27752] - -Geo:: -* [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder {pull}27692[#27692] (issues: {issue}27690[#27690], {issue}9120[#9120]) -* [Geo] Add Well Known Text (WKT) Parsing Support to ShapeBuilders {pull}27417[#27417] (issue: {issue}9120[#9120]) - -Highlighting:: -* Include all sentences smaller than fragment_size in the unified highlighter {pull}28132[#28132] (issue: {issue}28089[#28089]) - -Ingest:: -* Enable convert processor to support Long and Double {pull}27891[#27891] (issues: {issue}23085[#23085], {issue}23423[#23423]) - -Internal:: -* Make KeyedLock reentrant {pull}27920[#27920] -* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) -* Tighten the CountedBitSet class {pull}27632[#27632] -* Avoid doing redundant work when checking for self references. {pull}26927[#26927] (issue: {issue}26907[#26907]) - -Java API:: -* Add missing delegate methods to NodeIndicesStats {pull}28092[#28092] -* Java api clean-up : consistency for `shards_acknowledged` getters {pull}27819[#27819] (issue: {issue}27784[#27784]) - -Java High Level REST Client:: -* add toString implementation for UpdateRequest. {pull}27997[#27997] (issue: {issue}27986[#27986]) -* Add Close Index API to the high level REST client {pull}27734[#27734] (issue: {issue}27205[#27205]) -* Add Open Index API to the high level REST client {pull}27574[#27574] (issue: {issue}27205[#27205]) -* Added Create Index support to high-level REST client {pull}27351[#27351] (issue: {issue}27205[#27205]) -* Add multi get api to the high level rest client {pull}27337[#27337] (issue: {issue}27205[#27205]) -* Add msearch api to high level client {pull}27274[#27274] - -Mapping:: -* Allow `_doc` as a type. {pull}27816[#27816] (issues: {issue}27750[#27750], {issue}27751[#27751]) - -Network:: -* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) -* Add read timeouts to http module {pull}27713[#27713] -* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) -* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) -* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) - -Packaging:: -* Extend JVM options to support multiple versions {pull}27675[#27675] (issue: {issue}27646[#27646]) -* Add explicit coreutils dependency {pull}27660[#27660] (issue: {issue}27609[#27609]) -* Detect mktemp from coreutils {pull}27659[#27659] (issues: {issue}27609[#27609], {issue}27643[#27643]) -* Enable GC logs by default {pull}27610[#27610] -* Use private directory for temporary files {pull}27609[#27609] (issues: {issue}14372[#14372], {issue}27144[#27144]) - -Percolator:: -* also extract match_all queries when indexing percolator queries {pull}27585[#27585] - -Plugin Lang Painless:: -* Painless: Add whitelist extensions {pull}28161[#28161] -* Painless: Modify Loader to Load Classes Directly from Definition {pull}28088[#28088] -* Clean Up Painless Cast Object {pull}27794[#27794] -* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] - -Plugins:: -* Add client actions to action plugin {pull}28280[#28280] (issue: {issue}27759[#27759]) -* Plugins: Add validation to plugin descriptor parsing {pull}27951[#27951] -* Plugins: Add plugin extension capabilities {pull}27881[#27881] -* Add support for filtering mappings fields {pull}27603[#27603] - -Rank Evaluation:: -* Simplify RankEvalResponse output {pull}28266[#28266] - -Recovery:: -* Truncate tlog cli should assign global checkpoint {pull}28192[#28192] (issue: {issue}28181[#28181]) -* Replica starts peer recovery with safe commit {pull}28181[#28181] (issue: {issue}10708[#10708]) -* Primary send safe commit in file-based recovery {pull}28038[#28038] (issue: {issue}10708[#10708]) -* Fail resync-failed shards in subsequent writes {pull}28005[#28005] -* Introduce promoting index shard state {pull}28004[#28004] (issue: {issue}24841[#24841]) -* Non-peer recovery should set the global checkpoint {pull}27965[#27965] -* Persist global checkpoint when finalizing a peer recovery {pull}27947[#27947] (issue: {issue}27861[#27861]) -* Rollback a primary before recovering from translog {pull}27804[#27804] (issue: {issue}10708[#10708]) - -Search:: -* Use typeName() to check field type in GeoShapeQueryBuilder {pull}27730[#27730] -* Optimize search_after when sorting in index sort order {pull}26401[#26401] - -Sequence IDs:: -* Do not keep 5.x commits when having 6.x commits {pull}28188[#28188] (issues: {issue}27606[#27606], {issue}28038[#28038]) -* Use lastSyncedGlobalCheckpoint in deletion policy {pull}27826[#27826] (issue: {issue}27606[#27606]) -* Use CountedBitSet in LocalCheckpointTracker {pull}27793[#27793] -* Only fsync global checkpoint if needed {pull}27652[#27652] -* Keep commits and translog up to the global checkpoint {pull}27606[#27606] -* Adjust CombinedDeletionPolicy for multiple commits {pull}27456[#27456] (issues: {issue}10708[#10708], {issue}27367[#27367]) -* Keeps index commits up to the current global checkpoint {pull}27367[#27367] (issue: {issue}10708[#10708]) -* Dedup translog operations by reading in reverse {pull}27268[#27268] (issue: {issue}10708[#10708]) - -Settings:: -* Add validation of keystore setting names {pull}27626[#27626] - -Snapshot/Restore:: -* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] -* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] -* Include include_global_state in Snapshot status API (#22423) {pull}26853[#26853] (issue: {issue}22423[#22423]) - -Task Manager:: -* Add ability to associate an ID with tasks {pull}27764[#27764] (issue: {issue}23250[#23250]) - -Translog:: -* Simplify MultiSnapshot#SeqNoset {pull}27547[#27547] (issue: {issue}27268[#27268]) -* Enclose CombinedDeletionPolicy in SnapshotDeletionPolicy {pull}27528[#27528] (issues: {issue}27367[#27367], {issue}27456[#27456]) - - - -[[bug-6.2.0]] -[float] -=== Bug fixes - -Aggregations:: -* Adds metadata to rewritten aggregations {pull}28185[#28185] (issue: {issue}28170[#28170]) -* Fix NPE on composite aggregation with sub-aggregations that need scores {pull}28129[#28129] -* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) -* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) -* Fix global aggregation that requires breadth first and scores {pull}27942[#27942] (issues: {issue}22321[#22321], {issue}27928[#27928]) -* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] -* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) -* Using DocValueFormat::parseBytesRef for parsing missing value parameter {pull}27855[#27855] (issue: {issue}27788[#27788]) -* Fix illegal cast of the "low cardinality" optimization of the `terms` aggregation. {pull}27543[#27543] -* Always include the _index and _id for nested search hits. {pull}27201[#27201] (issue: {issue}27053[#27053]) - -Allocation:: -* Do not open indices with broken settings {pull}26995[#26995] - -Core:: -* Fix lock accounting in releasable lock {pull}28202[#28202] -* Fixes ByteSizeValue to serialise correctly {pull}27702[#27702] (issue: {issue}27568[#27568]) -* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) -* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] -* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) - -Engine:: -* Replica recovery could go into an endless flushing loop {pull}28350[#28350] -* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) -* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) -* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) - -Geo:: -* Correct two equality checks on incomparable types {pull}27688[#27688] -* Handle case where the hole vertex is south of the containing polygon(s) {pull}27685[#27685] (issue: {issue}25933[#25933]) - -Highlighting:: -* Fix highlighting on a keyword field that defines a normalizer {pull}27604[#27604] - -Inner Hits:: -* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) - -Internal:: -* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) -* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) -* Retain originalIndex info when rewriting FieldCapabilities requests {pull}27761[#27761] - -Java REST Client:: -* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) - -Mapping:: -* Ignore null value for range field (#27845) {pull}28116[#28116] (issue: {issue}27845[#27845]) -* Pass `java.locale.providers=COMPAT` to Java 9 onwards {pull}28080[#28080] (issue: {issue}10984[#10984]) -* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) -* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) - -Network:: -* Only bind loopback addresses when binding to local {pull}28029[#28029] (issue: {issue}1877[#1877]) -* Remove potential nio selector leak {pull}27825[#27825] -* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) -* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) -* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) - -Packaging:: -* Allow custom service names when installing on windows {pull}25255[#25255] (issue: {issue}25231[#25231]) - -Percolator:: -* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) - -Plugin Analysis ICU:: -* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] - -Plugin Analysis Phonetic:: -* Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter {pull}28225[#28225] (issue: {issue}28211[#28211]) - -Plugin Lang Painless:: -* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) -* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) - -Plugin Repository HDFS:: -* Fix SecurityException when HDFS Repository used against HA Namenodes {pull}27196[#27196] - -Plugins:: -* Make sure that we don't detect files as maven coordinate when installing a plugin {pull}28163[#28163] -* Fix upgrading indices which use a custom similarity plugin. {pull}26985[#26985] (issue: {issue}25350[#25350]) - -Recovery:: -* Open engine should keep only starting commit {pull}28228[#28228] (issues: {issue}27804[#27804], {issue}28181[#28181]) -* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) -* Set global checkpoint before open engine from store {pull}27972[#27972] (issues: {issue}27965[#27965], {issue}27970[#27970]) -* Check and repair index under the store metadata lock {pull}27768[#27768] (issues: {issue}24481[#24481], {issue}24787[#24787], {issue}27731[#27731]) -* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) - -Rollover:: -* Make index rollover action atomic {pull}28039[#28039] (issue: {issue}26976[#26976]) - -Scripting:: -* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] - -Scroll:: -* Reject scroll query if size is 0 (#22552) {pull}27842[#27842] (issue: {issue}22552[#22552]) -* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] - -Search:: -* Fix simple_query_string on invalid input {pull}28219[#28219] (issue: {issue}28204[#28204]) -* Use the underlying connection version for CCS connections {pull}28093[#28093] -* Fix synonym phrase query expansion for cross_fields parsing {pull}28045[#28045] -* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) -* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) - -Sequence IDs:: -* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] -* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) -* Obey translog durability in global checkpoint sync {pull}27641[#27641] - -Settings:: -* Settings: Introduce settings updater for a list of settings {pull}28338[#28338] (issue: {issue}28047[#28047]) -* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) -* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) -* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) - -Snapshot/Restore:: -* Consistent updates of IndexShardSnapshotStatus {pull}28130[#28130] (issue: {issue}26480[#26480]) -* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) -* Do not start snapshots that are deleted during initialization {pull}27931[#27931] -* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] -* Consistent update of stage and failure message in IndexShardSnapshotStatus {pull}27557[#27557] (issue: {issue}26480[#26480]) -* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) -* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) - -Stats:: -* Fixes DocStats to properly deal with shards that report -1 index size {pull}27863[#27863] -* Include internal refreshes in refresh stats {pull}27615[#27615] - -Term Vectors:: -* Fix term vectors generator with keyword and normalizer {pull}27608[#27608] (issue: {issue}27320[#27320]) - - - -[[upgrade-6.2.0]] -[float] -=== Upgrades - -Core:: -* Dependencies: Update joda time to 2.9.9 {pull}28261[#28261] -* upgrade to lucene 7.2.1 {pull}28218[#28218] (issue: {issue}28044[#28044]) -* Upgrade jna from 4.4.0-1 to 4.5.1 {pull}28183[#28183] (issue: {issue}28172[#28172]) - -Ingest:: -* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] diff --git a/docs/reference/release-notes/6.2.1.asciidoc b/docs/reference/release-notes/6.2.1.asciidoc deleted file mode 100644 index 1c885554d2260..0000000000000 --- a/docs/reference/release-notes/6.2.1.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -[[release-notes-6.2.1]] -== 6.2.1 Release Notes - -[[bug-6.2.1]] -[float] -=== Bug fixes - -Plugin Lang Painless:: -* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) - -Plugins:: -* Fix the ability to remove old plugin {pull}28540[#28540] (issue: {issue}28538[#28538]) - diff --git a/docs/reference/release-notes/6.2.2.asciidoc b/docs/reference/release-notes/6.2.2.asciidoc deleted file mode 100644 index 526432bb188e6..0000000000000 --- a/docs/reference/release-notes/6.2.2.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -[[release-notes-6.2.2]] -== 6.2.2 Release Notes - -Also see <>. - -[[enhancement-6.2.2]] -[float] -=== Enhancements - -Recovery:: -* Synced-flush should not seal index of out of sync replicas {pull}28464[#28464] (issue: {issue}10032[#10032]) - - - -[[bug-6.2.2]] -[float] -=== Bug fixes - -Core:: -* Handle throws on tasks submitted to thread pools {pull}28667[#28667] -* Fix size blocking queue to not lie about its weight {pull}28557[#28557] (issue: {issue}28547[#28547]) - -Ingest:: -* Guard accessDeclaredMembers for Tika on JDK 10 {pull}28603[#28603] (issue: {issue}28602[#28602]) -* Fix for bug that prevents pipelines to load that use stored scripts after a restart {pull}28588[#28588] - -Java High Level REST Client:: -* Fix parsing of script fields {pull}28395[#28395] (issue: {issue}28380[#28380]) -* Move to POST when calling API to retrieve which support request body {pull}28342[#28342] (issue: {issue}28326[#28326]) - -Packaging:: -* Fix using relative custom config path {pull}28700[#28700] (issue: {issue}27610[#27610]) -* Disable console logging in the Windows service {pull}28618[#28618] (issue: {issue}20422[#20422]) - -Percolator:: -* Do not take duplicate query extractions into account for minimum_should_match attribute {pull}28353[#28353] (issue: {issue}28315[#28315]) - -Recovery:: -* Fsync directory after cleanup {pull}28604[#28604] (issue: {issue}28435[#28435]) - diff --git a/docs/reference/release-notes/6.2.3.asciidoc b/docs/reference/release-notes/6.2.3.asciidoc deleted file mode 100644 index a84b5d64ab7b2..0000000000000 --- a/docs/reference/release-notes/6.2.3.asciidoc +++ /dev/null @@ -1,48 +0,0 @@ -[[release-notes-6.2.3]] -== 6.2.3 Release Notes - -Also see <>. - -[[enhancement-6.2.3]] -[float] -=== Enhancements - -Highlighting:: -* Limit analyzed text for highlighting (improvements) {pull}28808[#28808] (issues: {issue}16764[#16764], {issue}27934[#27934]) - -Recovery:: -* Require translogUUID when reading global checkpoint {pull}28587[#28587] (issue: {issue}28435[#28435]) - -[[bug-6.2.3]] -[float] -=== Bug fixes - -Core:: -* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) - -Engine:: -* Avoid class cast exception from index writer {pull}28989[#28989] -* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) -* Never block on key in `LiveVersionMap#pruneTombstones` {pull}28736[#28736] (issue: {issue}28714[#28714]) - -Ingest:: -* Continue registering pipelines after one pipeline parse failure. {pull}28752[#28752] (issue: {issue}28269[#28269]) - -Java High Level REST Client:: -* REST high-level client: encode path parts {pull}28663[#28663] (issue: {issue}28625[#28625]) - -Packaging:: -* Delay path expansion on Windows {pull}28753[#28753] (issues: {issue}27675[#27675], {issue}28748[#28748]) - -Percolator:: -* Fix percolator query analysis for function_score query {pull}28854[#28854] -* Improved percolator's random candidate query duel test {pull}28840[#28840] - -Snapshot/Restore:: -* Fix NPE when using deprecated Azure settings {pull}28769[#28769] (issues: {issue}23518[#23518], {issue}28299[#28299]) - -Stats:: -* Fix AdaptiveSelectionStats serialization bug {pull}28718[#28718] (issue: {issue}28713[#28713]) - - - diff --git a/docs/reference/release-notes/6.2.4.asciidoc b/docs/reference/release-notes/6.2.4.asciidoc deleted file mode 100644 index 38d796a00d63f..0000000000000 --- a/docs/reference/release-notes/6.2.4.asciidoc +++ /dev/null @@ -1,41 +0,0 @@ -[[release-notes-6.2.4]] -== 6.2.4 Release Notes - -Also see <>. - -[[bug-6.2.4]] -[float] -=== Bug fixes - -Engine:: -* Harden periodically check to avoid endless flush loop {pull}29125[#29125] (issues: {issue}28350[#28350], {issue}29097[#29097]) - -Ingest:: -* Don't allow referencing the pattern bank name in the pattern bank {pull}29295[#29295] (issue: {issue}29257[#29257]) - -Java High Level REST Client:: -* Bulk processor#awaitClose to close scheduler {pull}29263[#29263] - -Java Low Level REST Client:: -* REST client: hosts marked dead for the first time should not be immediately retried {pull}29230[#29230] - -Network:: -* Cross-cluster search and default connections can get crossed [OPEN] [ISSUE] {pull}29321[#29321] - -Percolator:: -* Fixed bug when non percolator docs end up in the search hits {pull}29447[#29447] (issue: {issue}29429[#29429]) -* Fixed a msm accounting error that can occur during analyzing a percolator query {pull}29415[#29415] (issue: {issue}29393[#29393]) -* Fix more query extraction bugs. {pull}29388[#29388] (issues: {issue}28353[#28353], {issue}29376[#29376]) -* Fix some query extraction bugs. {pull}29283[#29283] - -Plugins:: -* Plugins: Fix native controller confirmation for non-meta plugin {pull}29434[#29434] - -Search:: -* Propagate ignore_unmapped to inner_hits {pull}29261[#29261] (issue: {issue}29071[#29071]) - -Settings:: -* Archive unknown or invalid settings on updates {pull}28888[#28888] (issue: {issue}28609[#28609]) - - - diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 49709ba1d6dc1..44c806be1951b 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -38,8 +38,7 @@ Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website. `docker`:: Images are available for running Elasticsearch as Docker containers. They may be -downloaded from the Elastic Docker Registry. The default image ships with -{xpack-ref}/index.html[X-Pack] pre-installed. +downloaded from the Elastic Docker Registry. + {ref}/docker.html[Install {es} with Docker] diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index d055f1251e19d..af9d35f3f169d 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -2,9 +2,11 @@ === Install Elasticsearch with Debian Package The Debian package for Elasticsearch can be <> -or from our <>. It can be used to install +or from our <>. It can be used to install Elasticsearch on any Debian-based system such as Debian and Ubuntu. +include::license.asciidoc[] + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. @@ -125,6 +127,10 @@ sudo dpkg -i elasticsearch-{version}.deb -------------------------------------------- <1> Compares the SHA of the downloaded Debian package and the published checksum, which should output `elasticsearch-{version}.deb: OK`. + +Alternatively, you can download the following package, which contains only +features that are available under the Apache 2.0 license: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.deb endif::[] diff --git a/docs/reference/setup/install/license.asciidoc b/docs/reference/setup/install/license.asciidoc new file mode 100644 index 0000000000000..0e2701a65a401 --- /dev/null +++ b/docs/reference/setup/install/license.asciidoc @@ -0,0 +1,6 @@ +This package is free to use under the Elastic license. It contains open source +and free commercial features and access to paid commercial features. +{stack-ov}/license-management.html[Start a 30-day trial] to try out all of the +paid commercial features. See the +https://www.elastic.co/subscriptions[Subscriptions] page for information about +Elastic license levels. diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index 730f043341773..a44b0b37d31ec 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -9,6 +9,8 @@ and Oracle Enterprise. NOTE: RPM install is not supported on distributions with old versions of RPM, such as SLES 11 and CentOS 5. Please see <> instead. +include::license.asciidoc[] + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the link:/downloads/past-releases[Past Releases page]. @@ -110,6 +112,10 @@ sudo rpm --install elasticsearch-{version}.rpm -------------------------------------------- <1> Compares the SHA of the downloaded RPM and the published checksum, which should output `elasticsearch-{version}.rpm: OK`. + +Alternatively, you can download the following package, which contains only +features that are available under the Apache 2.0 license: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.rpm endif::[] diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 5d79e9669f9f0..c48ec5de22d2c 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -10,6 +10,8 @@ the included `elasticsearch.exe` executable. TIP: Elasticsearch has historically been installed on Windows using the <> archive. You can continue using the `.zip` approach if you prefer. +include::license.asciidoc[] + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the @@ -32,6 +34,10 @@ ifeval::["{release-state}"!="unreleased"] Download the `.msi` package for Elasticsearch v{version} from https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.msi +Alternatively, you can download the following package, which contains only +features that are available under the Apache 2.0 license: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.msi + endif::[] [[install-msi-gui]] diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index 18cf0a6506fe4..94de390656b1e 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -5,6 +5,8 @@ Elasticsearch is provided as a `.zip` and as a `.tar.gz` package. These packages can be used to install Elasticsearch on any system and are the easiest package format to use when trying out Elasticsearch. +include::license.asciidoc[] + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the @@ -40,6 +42,10 @@ cd elasticsearch-{version}/ <2> `elasticsearch-{version}.zip: OK`. <2> This directory is known as `$ES_HOME`. +Alternatively, you can download the following package, which contains only +features that are available under the Apache 2.0 license: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.zip + endif::[] @@ -68,6 +74,10 @@ cd elasticsearch-{version}/ <2> `elasticsearch-{version}.tar.gz: OK`. <2> This directory is known as `$ES_HOME`. +Alternatively, you can download the following package, which includes only +Apache 2.0 licensed code: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.tar.gz + endif::[] ifdef::include-xpack[] diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 3ebf4f3d77dc3..18c1272d25db5 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -9,6 +9,8 @@ TIP: Elasticsearch has historically been installed on Windows using the `.zip` a An <> is available that provides the easiest getting started experience for Windows. You can continue using the `.zip` approach if you prefer. +include::license.asciidoc[] + The latest stable version of Elasticsearch can be found on the link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can be found on the @@ -31,6 +33,10 @@ ifeval::["{release-state}"!="unreleased"] Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip +Alternatively, you can download the following package, which contains only +features that are available under the Apache 2.0 license: +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.zip + Unzip it with your favourite unzip tool. This will create a folder called +elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal window, `cd` to the `%ES_HOME%` directory, for instance: diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index ca5233011fafc..b58ac48bef968 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -91,25 +91,20 @@ already have local shard copies. + -- When all nodes have joined the cluster and recovered their primary shards, -reenable allocation. +reenable allocation by restoring `cluster.routing.allocation.enable` to its +default: [source,js] ------------------------------------------------------ PUT _cluster/settings { - "transient": { - "cluster.routing.allocation.enable": "all" + "persistent": { + "cluster.routing.allocation.enable": null } } ------------------------------------------------------ // CONSOLE -NOTE: Because <<_precedence_of_settings, transient -settings take precedence over persistent settings>>, this overrides the -persistent setting used to disable shard allocation in the first step. If you -don't explicitly reenable shard allocation after a full cluster restart, the -persistent setting is used and shard allocation remains disabled. - Once allocation is reenabled, the cluster starts allocating replica shards to the data nodes. At this point it is safe to resume indexing and searching, but your cluster will recover more quickly if you can wait until all primary diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 5af521303175c..76a10f752bec7 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -72,21 +72,15 @@ GET _cat/nodes + -- -NOTE: Because <<_precedence_of_settings, transient -settings take precedence over persistent settings>>, this overrides the -persistent setting used to disable shard allocation in the first step. If you -don't explicitly reenable shard allocation after a full cluster restart, the -persistent setting is used and shard allocation remains disabled. - -Once the node has joined the cluster, reenable shard allocation to start using -the node: +Once the node has joined the cluster, remove the `cluster.routing.allocation.enable` +setting to enable shard allocation and start using the node: [source,js] -------------------------------------------------- PUT _cluster/settings { - "transient": { - "cluster.routing.allocation.enable": "all" + "persistent": { + "cluster.routing.allocation.enable": null } } -------------------------------------------------- diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java index 7447f0111f7e2..8687785796508 100644 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java @@ -107,7 +107,7 @@ public static Boolean parseBoolean(String value, Boolean defaultValue) { } /** - * Returns false if text is in false, 0, off, no; else, true + * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. * * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead. */ @@ -119,9 +119,7 @@ public static Boolean parseBooleanLenient(String value, Boolean defaultValue) { return parseBooleanLenient(value, false); } /** - * Returns true iff the value is neither of the following: - * false, 0, off, no - * otherwise false + * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. * * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead. */ @@ -134,21 +132,21 @@ public static boolean parseBooleanLenient(String value, boolean defaultValue) { } /** - * @return true iff the value is false, otherwise false. + * @return {@code true} iff the value is "false", otherwise {@code false}. */ public static boolean isFalse(String value) { return "false".equals(value); } /** - * @return true iff the value is true, otherwise false + * @return {@code true} iff the value is "true", otherwise {@code false}. */ public static boolean isTrue(String value) { return "true".equals(value); } /** - * Returns false if text is in false, 0, off, no; else, true + * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. * * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead */ diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java index 4108992fb1f59..67663516167d5 100644 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java @@ -37,11 +37,11 @@ public final class IOUtils { private IOUtils() { - + // Static utils methods } /** - * Closes all given Closeables. Some of the Closeables may be null; they are + * Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are * ignored. After everything is closed, the method either throws the first exception it hit * while closing with other exceptions added as suppressed, or completes normally if there were * no exceptions. @@ -53,7 +53,7 @@ public static void close(final Closeable... objects) throws IOException { } /** - * Closes all given Closeables. Some of the Closeables may be null; they are + * Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are * ignored. After everything is closed, the method adds any exceptions as suppressed to the * original exception, or throws the first exception it hit if {@code Exception} is null. If * no exceptions are encountered and the passed in exception is null, it completes normally. @@ -65,7 +65,7 @@ public static void close(final Exception e, final Closeable... objects) throws I } /** - * Closes all given Closeables. Some of the Closeables may be null; they are + * Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are * ignored. After everything is closed, the method either throws the first exception it hit * while closing with other exceptions added as suppressed, or completes normally if there were * no exceptions. diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml index ce8c03afec607..ede2927b992e0 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -3,6 +3,8 @@ setup: indices.create: index: test body: + settings: + number_of_shards: 1 mappings: test: properties: diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 0577aa01ebd8f..b3c6a5cc5ed6a 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -57,7 +57,7 @@ import java.util.Objects; /** - * A query builder for has_child query. + * A query builder for {@code has_child} query. */ public class HasChildQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "has_child"; diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index 3019532779800..13926d7d362ff 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -41,7 +41,7 @@ /** * Metric implementing Discounted Cumulative Gain. - * The `normalize` parameter can be set to calculate the normalized NDCG (set to false by default).
+ * The `normalize` parameter can be set to calculate the normalized NDCG (set to {@code false} by default).
* The optional `unknown_doc_rating` parameter can be used to specify a default rating for unlabeled documents. * @see Discounted Cumulative Gain
*/ diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 8f17c8203b7e8..392ce5d0633a0 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -192,7 +192,7 @@ public Map getParams() { return Collections.unmodifiableMap(this.params); } - /** return the parameters if this request uses a template, otherwise this will be null. */ + /** return the parameters if this request uses a template, otherwise this will be {@code null}. */ public String getTemplateId() { return this.templateId; } diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 479fe78cc8071..a153cc555c81d 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -17,6 +17,10 @@ * under the License. */ +import org.apache.tools.ant.taskdefs.condition.Os + +import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + apply plugin: 'elasticsearch.test-with-dependencies' esplugin { @@ -60,3 +64,64 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Hierarchy', 'org.apache.log.Logger', ] + +// Support for testing reindex-from-remote against old Elaticsearch versions +configurations { + oldesFixture + es2 + es1 + es090 +} + +dependencies { + oldesFixture project(':test:fixtures:old-elasticsearch') + /* Right now we just test against the latest version of each major we expect + * reindex-from-remote to work against. We could randomize the versions but + * that doesn't seem worth it at this point. */ + es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip' + es1 'org.elasticsearch:elasticsearch:1.7.6@zip' + es090 'org.elasticsearch:elasticsearch:0.90.13@zip' +} + +if (Os.isFamily(Os.FAMILY_WINDOWS)) { + logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows") + integTestRunner.systemProperty "tests.fromOld", "false" +} else if (rootProject.rootDir.toString().contains(" ")) { + logger.warn("Disabling reindex-from-old tests because Elasticsearch 1.7 won't start with spaces in the path") + integTestRunner.systemProperty "tests.fromOld", "false" +} else { + integTestRunner.systemProperty "tests.fromOld", "true" + /* Set up tasks to unzip and run the old versions of ES before running the + * integration tests. */ + for (String version : ['2', '1', '090']) { + Task unzip = task("unzipEs${version}", type: Sync) { + Configuration oldEsDependency = configurations['es' + version] + dependsOn oldEsDependency + /* Use a closure here to delay resolution of the dependency until we need + * it */ + from { + oldEsDependency.collect { zipTree(it) } + } + into temporaryDir + } + Task fixture = task("oldEs${version}Fixture", + type: org.elasticsearch.gradle.test.AntFixture) { + dependsOn project.configurations.oldesFixture + dependsOn unzip + executable = new File(project.runtimeJavaHome, 'bin/java') + env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" + env 'JAVA_HOME', getJavaHome(it, 7) + args 'oldes.OldElasticsearch', + baseDir, + unzip.temporaryDir, + version == '090' + } + integTest.dependsOn fixture + integTestRunner { + /* Use a closure on the string to delay evaluation until right before we + * run the integration tests so that we can be sure that the file is + * ready. */ + systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" + } + } +} diff --git a/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java similarity index 95% rename from qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java index 459aff3439710..5d359053a6668 100644 --- a/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.smoketest; +package org.elasticsearch.index.reindex.remote; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -27,6 +27,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Booleans; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; @@ -38,6 +39,9 @@ public class ReindexFromOldRemoteIT extends ESRestTestCase { private void oldEsTestCase(String portPropertyName, String requestsPerSecond) throws IOException { + boolean enabled = Booleans.parseBoolean(System.getProperty("tests.fromOld")); + assumeTrue("test is disabled, probably because this is windows", enabled); + int oldEsPort = Integer.parseInt(System.getProperty(portPropertyName)); try (RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build()) { try { diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java index e840017eb81c2..ed172ef669dae 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuCollationTokenFilterFactory.java @@ -36,12 +36,12 @@ /** * An ICU based collation token filter. There are two ways to configure collation: - *

The first is simply specifying the locale (defaults to the default locale). The language - * parameter is the lowercase two-letter ISO-639 code. An additional country and variant + *

The first is simply specifying the locale (defaults to the default locale). The {@code language} + * parameter is the lowercase two-letter ISO-639 code. An additional {@code country} and {@code variant} * can be provided. *

The second option is to specify collation rules as defined in the - * Collation customization chapter in icu docs. The rules parameter can either embed the rules definition - * in the settings or refer to an external location (preferable located under the config location, relative to it). + * Collation customization chapter in icu docs. The {@code rules} parameter can either embed the rules definition + * in the settings or refer to an external location (preferable located under the {@code config} location, relative to it). */ public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory { diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java index 60ab831e6f1f4..6505e1db0f889 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuFoldingTokenFilterFactory.java @@ -35,7 +35,7 @@ * Can be filtered to handle certain characters in a specified way (see http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html) * E.g national chars that should be retained (filter : "[^åäöÅÄÖ]"). * - *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering. + *

The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering. * * @author kimchy (shay.banon) */ diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java index 3046d6839b9e6..e43e163e1a034 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerCharFilterFactory.java @@ -32,9 +32,9 @@ /** * Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character. - *

The name can be used to provide the type of normalization to perform.

- *

The mode can be used to provide 'compose' or 'decompose'. Default is compose.

- *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering.

+ *

The {@code name} can be used to provide the type of normalization to perform.

+ *

The {@code mode} can be used to provide 'compose' or 'decompose'. Default is compose.

+ *

The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.

*/ public class IcuNormalizerCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent { diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java index 4e8d5d702205d..1ef09f86052bd 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java @@ -31,10 +31,8 @@ /** * Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens. - *

The name can be used to provide the type of normalization to perform.

- *

The unicodeSetFilter attribute can be used to provide the UniCodeSet for filtering.

- * - * + *

The {@code name} can be used to provide the type of normalization to perform.

+ *

The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.

*/ public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index ecb63fab6981c..e164a8553f81f 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,8 +1,3 @@ -import org.elasticsearch.gradle.test.AntFixture - -import java.security.KeyPair -import java.security.KeyPairGenerator - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -58,44 +53,7 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Logger', ] -/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/ -task googleCloudStorageFixture(type: AntFixture) { - dependsOn compileTestJava - env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" - executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', baseDir, 'bucket_test' -} - -/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ -File serviceAccountFile = new File(project.buildDir, "generated-resources/service_account_test.json") -task createServiceAccountFile() { - dependsOn googleCloudStorageFixture - doLast { - KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") - keyPairGenerator.initialize(1024) - KeyPair keyPair = keyPairGenerator.generateKeyPair() - String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded()) - - serviceAccountFile.parentFile.mkdirs() - serviceAccountFile.setText("{\n" + - ' "type": "service_account",\n' + - ' "project_id": "integration_test",\n' + - ' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' + - ' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' + - ' "client_email": "integration_test@appspot.gserviceaccount.com",\n' + - ' "client_id": "123456789101112130594",\n' + - " \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" + - " \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" + - ' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' + - ' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' + - '}', 'UTF-8') - } -} - -integTestCluster { - dependsOn createServiceAccountFile, googleCloudStorageFixture - keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" - - /* Use a closure on the string to delay evaluation until tests are executed */ - setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" -} +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:google-cloud-storage:check' +} \ No newline at end of file diff --git a/plugins/repository-gcs/qa/build.gradle b/plugins/repository-gcs/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle new file mode 100644 index 0000000000000..afd49b9f4dc73 --- /dev/null +++ b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +import java.security.KeyPair +import java.security.KeyPairGenerator + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:repository-gcs', configuration: 'runtime') +} + +integTestCluster { + plugin ':plugins:repository-gcs' +} + +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +boolean useFixture = false + +String gcsServiceAccount = System.getenv("google_storage_service_account") +String gcsBucket = System.getenv("google_storage_bucket") +String gcsBasePath = System.getenv("google_storage_base_path") + +File serviceAccountFile = null +if (!gcsServiceAccount && !gcsBucket && !gcsBasePath) { + serviceAccountFile = new File(project.buildDir, 'generated-resources/service_account_test.json') + gcsBucket = 'bucket_test' + gcsBasePath = 'integration_test' + useFixture = true +} else { + serviceAccountFile = new File(gcsServiceAccount) + if (serviceAccountFile.exists() == false || serviceAccountFile.canRead() == false) { + throw new FileNotFoundException(gcsServiceAccount, "Google Storage service account file does not exist or is not readable") + } +} + +/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/ +task googleCloudStorageFixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', baseDir, 'bucket_test' +} + +/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ +task createServiceAccountFile() { + dependsOn googleCloudStorageFixture + doLast { + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") + keyPairGenerator.initialize(1024) + KeyPair keyPair = keyPairGenerator.generateKeyPair() + String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded()) + + serviceAccountFile.parentFile.mkdirs() + serviceAccountFile.setText("{\n" + + ' "type": "service_account",\n' + + ' "project_id": "integration_test",\n' + + ' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' + + ' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' + + ' "client_email": "integration_test@appspot.gserviceaccount.com",\n' + + ' "client_id": "123456789101112130594",\n' + + " \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" + + " \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" + + ' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' + + ' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' + + '}', 'UTF-8') + } +} + +Map expansions = [ + 'bucket': gcsBucket, + 'base_path': gcsBasePath +] + +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" + + if (useFixture) { + dependsOn createServiceAccountFile, googleCloudStorageFixture + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" + } else { + println "Using an external service to test the repository-gcs plugin" + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java similarity index 100% rename from plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java rename to plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryClientYamlTestSuiteIT.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..8d85b38919380 --- /dev/null +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class GoogleCloudStorageRepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public GoogleCloudStorageRepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java similarity index 100% rename from plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java rename to plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml new file mode 100644 index 0000000000000..65d02b5fadefc --- /dev/null +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml @@ -0,0 +1,177 @@ +# Integration tests for repository-gcs +--- +"Snapshot/Restore with repository-gcs": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: gcs + settings: + bucket: ${bucket} + client: "integration_test" + base_path: ${base_path} + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: { repository.settings.bucket : ${bucket} } + - match: { repository.settings.client : "integration_test" } + - match: { repository.settings.base_path : ${base_path} } + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 7b985ebd176d6..27736e24dbf51 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -24,12 +24,13 @@ import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; import java.util.Locale; +import java.util.concurrent.ConcurrentHashMap; public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override protected BlobStore newBlobStore() { String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockStorage.newStorageClient(bucket, getTestName())); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 37470b0b5afc8..f52dc492f6f44 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -28,14 +28,13 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; -import org.junit.BeforeClass; +import org.junit.AfterClass; -import java.net.SocketPermission; -import java.security.AccessController; import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -43,9 +42,9 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos private static final String BUCKET = "gcs-repository-test"; - // Static storage client shared among all nodes in order to act like a remote repository service: + // Static list of blobs shared among all nodes in order to act like a remote repository service: // all nodes must see the same content - private static final AtomicReference storage = new AtomicReference<>(); + private static final ConcurrentMap blobs = new ConcurrentHashMap<>(); @Override protected Collection> nodePlugins() { @@ -63,15 +62,17 @@ protected void createTestRepository(String name) { .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); } - @BeforeClass - public static void setUpStorage() { - storage.set(MockStorage.newStorageClient(BUCKET, GoogleCloudStorageBlobStoreRepositoryTests.class.getName())); + @AfterClass + public static void wipeRepository() { + blobs.clear(); } public static class MockGoogleCloudStoragePlugin extends GoogleCloudStoragePlugin { + public MockGoogleCloudStoragePlugin(final Settings settings) { super(settings); } + @Override protected GoogleCloudStorageService createStorageService(Environment environment) { return new MockGoogleCloudStorageService(environment, getClientsSettings()); @@ -89,9 +90,7 @@ public Storage createClient(final String clientName, final String application, final TimeValue connectTimeout, final TimeValue readTimeout) { - // The actual impl might open a connection. So check we have permission when this call is made. - AccessController.checkPermission(new SocketPermission("*", "connect")); - return storage.get(); + return new MockStorage(BUCKET, blobs); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index 00c0538d198bd..5e25307805235 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -24,12 +24,13 @@ import org.elasticsearch.repositories.ESBlobStoreTestCase; import java.util.Locale; +import java.util.concurrent.ConcurrentHashMap; public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase { @Override protected BlobStore newBlobStore() { String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockStorage.newStorageClient(bucket, getTestName())); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java index 473424986a0e5..26324d614d4aa 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import java.util.concurrent.ConcurrentHashMap; + public class GoogleCloudStorageRepositoryDeprecationTests extends ESTestCase { public void testDeprecatedSettings() throws Exception { @@ -45,7 +47,7 @@ public void testDeprecatedSettings() throws Exception { new GoogleCloudStorageService(environment, GoogleCloudStorageClientSettings.load(Settings.EMPTY)) { @Override public Storage createClient(String clientName, String application, TimeValue connect, TimeValue read) throws Exception { - return MockStorage.newStorageClient("test", "deprecated"); + return new MockStorage("test", new ConcurrentHashMap<>()); } }); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index a04dae294975a..325cea132beb6 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -19,74 +19,289 @@ package org.elasticsearch.repositories.gcs; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.client.http.AbstractInputStreamContent; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpMethods; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpResponseException; import com.google.api.client.http.LowLevelHttpRequest; import com.google.api.client.http.LowLevelHttpResponse; -import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.http.MultipartContent; +import com.google.api.client.json.JsonFactory; +import com.google.api.client.testing.http.MockHttpTransport; import com.google.api.client.testing.http.MockLowLevelHttpRequest; import com.google.api.client.testing.http.MockLowLevelHttpResponse; import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.StorageObject; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.rest.RestStatus; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.util.Map; +import java.io.InputStream; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.concurrent.ConcurrentMap; + +import static org.mockito.Mockito.mock; /** - * {@link MockStorage} is a utility class that provides {@link Storage} clients that works - * against an embedded {@link GoogleCloudStorageTestServer}. + * {@link MockStorage} mocks a {@link Storage} client by storing all the blobs + * in a given concurrent map. */ -class MockStorage extends com.google.api.client.testing.http.MockHttpTransport { +class MockStorage extends Storage { - /** - * Embedded test server that emulates a Google Cloud Storage service - **/ - private final GoogleCloudStorageTestServer server = new GoogleCloudStorageTestServer(); + /* A custom HTTP header name used to propagate the name of the blobs to delete in batch requests */ + private static final String DELETION_HEADER = "x-blob-to-delete"; - private MockStorage() { + private final String bucketName; + private final ConcurrentMap blobs; + + MockStorage(final String bucket, final ConcurrentMap blobs) { + super(new MockedHttpTransport(blobs), mock(JsonFactory.class), mock(HttpRequestInitializer.class)); + this.bucketName = bucket; + this.blobs = blobs; } @Override - public LowLevelHttpRequest buildRequest(String method, String url) throws IOException { - return new MockLowLevelHttpRequest() { - @Override - public LowLevelHttpResponse execute() throws IOException { - return convert(server.handle(method, url, getHeaders(), getContentAsBytes())); - } + public Buckets buckets() { + return new MockBuckets(); + } - /** Returns the LowLevelHttpRequest body as an array of bytes **/ - byte[] getContentAsBytes() throws IOException { - ByteArrayOutputStream out = new ByteArrayOutputStream(); - if (getStreamingContent() != null) { - getStreamingContent().writeTo(out); + @Override + public Objects objects() { + return new MockObjects(); + } + + class MockBuckets extends Buckets { + + @Override + public Get get(String getBucket) { + return new Get(getBucket) { + @Override + public Bucket execute() { + if (bucketName.equals(getBucket())) { + Bucket bucket = new Bucket(); + bucket.setId(bucketName); + return bucket; + } else { + return null; + } } - return out.toByteArray(); - } - }; + }; + } } - private static MockLowLevelHttpResponse convert(final GoogleCloudStorageTestServer.Response response) { - final MockLowLevelHttpResponse lowLevelHttpResponse = new MockLowLevelHttpResponse(); - for (Map.Entry header : response.headers.entrySet()) { - lowLevelHttpResponse.addHeader(header.getKey(), header.getValue()); + class MockObjects extends Objects { + + @Override + public Get get(String getBucket, String getObject) { + return new Get(getBucket, getObject) { + @Override + public StorageObject execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + if (blobs.containsKey(getObject()) == false) { + throw newObjectNotFoundException(getObject()); + } + + StorageObject storageObject = new StorageObject(); + storageObject.setId(getObject()); + return storageObject; + } + + @Override + public InputStream executeMediaAsInputStream() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + if (blobs.containsKey(getObject()) == false) { + throw newObjectNotFoundException(getObject()); + } + return new ByteArrayInputStream(blobs.get(getObject())); + } + }; + } + + @Override + public Insert insert(String insertBucket, StorageObject insertObject, AbstractInputStreamContent insertStream) { + return new Insert(insertBucket, insertObject) { + @Override + public StorageObject execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(insertStream.getInputStream(), out); + blobs.put(getName(), out.toByteArray()); + return null; + } + }; + } + + @Override + public List list(String listBucket) { + return new List(listBucket) { + @Override + public com.google.api.services.storage.model.Objects execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + + final com.google.api.services.storage.model.Objects objects = new com.google.api.services.storage.model.Objects(); + + final java.util.List storageObjects = new ArrayList<>(); + for (Entry blob : blobs.entrySet()) { + if (getPrefix() == null || blob.getKey().startsWith(getPrefix())) { + StorageObject storageObject = new StorageObject(); + storageObject.setId(blob.getKey()); + storageObject.setName(blob.getKey()); + storageObject.setSize(BigInteger.valueOf((long) blob.getValue().length)); + storageObjects.add(storageObject); + } + } + + objects.setItems(storageObjects); + return objects; + } + }; } - lowLevelHttpResponse.setContentType(response.contentType); - lowLevelHttpResponse.setStatusCode(response.status.getStatus()); - lowLevelHttpResponse.setReasonPhrase(response.status.toString()); - if (response.body != null) { - lowLevelHttpResponse.setContent(response.body); - lowLevelHttpResponse.setContentLength(response.body.length); + + @Override + public Delete delete(String deleteBucket, String deleteObject) { + return new Delete(deleteBucket, deleteObject) { + @Override + public Void execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + + if (blobs.containsKey(getObject()) == false) { + throw newObjectNotFoundException(getObject()); + } + + blobs.remove(getObject()); + return null; + } + + @Override + public HttpRequest buildHttpRequest() throws IOException { + HttpRequest httpRequest = super.buildHttpRequest(); + httpRequest.getHeaders().put(DELETION_HEADER, getObject()); + return httpRequest; + } + }; + } + + @Override + public Copy copy(String srcBucket, String srcObject, String destBucket, String destObject, StorageObject content) { + return new Copy(srcBucket, srcObject, destBucket, destObject, content) { + @Override + public StorageObject execute() throws IOException { + if (bucketName.equals(getSourceBucket()) == false) { + throw newBucketNotFoundException(getSourceBucket()); + } + if (bucketName.equals(getDestinationBucket()) == false) { + throw newBucketNotFoundException(getDestinationBucket()); + } + + final byte[] bytes = blobs.get(getSourceObject()); + if (bytes == null) { + throw newObjectNotFoundException(getSourceObject()); + } + blobs.put(getDestinationObject(), bytes); + + StorageObject storageObject = new StorageObject(); + storageObject.setId(getDestinationObject()); + return storageObject; + } + }; } - return lowLevelHttpResponse; + } + + private static GoogleJsonResponseException newBucketNotFoundException(final String bucket) { + HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Bucket not found: " + bucket, new HttpHeaders()); + return new GoogleJsonResponseException(builder, new GoogleJsonError()); + } + + private static GoogleJsonResponseException newObjectNotFoundException(final String object) { + HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Object not found: " + object, new HttpHeaders()); + return new GoogleJsonResponseException(builder, new GoogleJsonError()); } /** - * Instanciates a mocked Storage client for tests. + * {@link MockedHttpTransport} extends the existing testing transport to analyze the content + * of {@link com.google.api.client.googleapis.batch.BatchRequest} and delete the appropriates + * blobs. We use this because {@link Storage#batch()} is final and there is no other way to + * extend batch requests for testing purposes. */ - public static Storage newStorageClient(final String bucket, final String applicationName) { - MockStorage mockStorage = new MockStorage(); - mockStorage.server.createBucket(bucket); + static class MockedHttpTransport extends MockHttpTransport { + + private final ConcurrentMap blobs; + + MockedHttpTransport(final ConcurrentMap blobs) { + this.blobs = blobs; + } - return new Storage.Builder(mockStorage, JacksonFactory.getDefaultInstance(), null) - .setApplicationName(applicationName) - .build(); + @Override + public LowLevelHttpRequest buildRequest(final String method, final String url) throws IOException { + // We analyze the content of the Batch request to detect our custom HTTP header, + // and extract from it the name of the blob to delete. Then we reply a simple + // batch response so that the client parser is happy. + // + // See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch for the + // format of the batch request body. + if (HttpMethods.POST.equals(method) && url.endsWith("/batch")) { + return new MockLowLevelHttpRequest() { + @Override + public LowLevelHttpResponse execute() throws IOException { + final String contentType = new MultipartContent().getType(); + + final StringBuilder builder = new StringBuilder(); + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + getStreamingContent().writeTo(out); + + Streams.readAllLines(new ByteArrayInputStream(out.toByteArray()), line -> { + if (line != null && line.startsWith(DELETION_HEADER)) { + builder.append("--__END_OF_PART__\r\n"); + builder.append("Content-Type: application/http").append("\r\n"); + builder.append("\r\n"); + builder.append("HTTP/1.1 "); + + final String blobName = line.substring(line.indexOf(':') + 1).trim(); + if (blobs.containsKey(blobName)) { + builder.append(RestStatus.OK.getStatus()); + blobs.remove(blobName); + } else { + builder.append(RestStatus.NOT_FOUND.getStatus()); + } + builder.append("\r\n"); + builder.append("Content-Type: application/json; charset=UTF-8").append("\r\n"); + builder.append("Content-Length: 0").append("\r\n"); + builder.append("\r\n"); + } + }); + builder.append("\r\n"); + builder.append("--__END_OF_PART__--"); + } + + MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); + response.setStatusCode(200); + response.setContent(builder.toString()); + response.setContentType(contentType); + return response; + } + }; + } else { + return super.buildRequest(method, url); + } + } } } diff --git a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml index 62387227cbc9d..f4259771644b2 100644 --- a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml +++ b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml @@ -11,176 +11,3 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-gcs } ---- -"Snapshot/Restore with repository-gcs": - - skip: - version: " - 6.3.0" - reason: repository-gcs was not testable through YAML tests until 6.3.0 - - # Register repository - - do: - snapshot.create_repository: - repository: repository - body: - type: gcs - settings: - bucket: "bucket_test" - client: "integration_test" - - - match: { acknowledged: true } - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 1 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 2 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 3 - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 4 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 5 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 6 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 7 - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-one - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository - - - - diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index ae971cfe4e1ec..23252881cd75f 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -64,9 +64,14 @@ test { exclude '**/*CredentialsTests.class' } +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:amazon-s3:check' +} + integTestCluster { - keystoreSetting 's3.client.default.access_key', 'myaccesskey' - keystoreSetting 's3.client.default.secret_key', 'mysecretkey' + keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key" + keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key" } thirdPartyAudit.excludes = [ diff --git a/plugins/repository-s3/qa/amazon-s3/build.gradle b/plugins/repository-s3/qa/amazon-s3/build.gradle new file mode 100644 index 0000000000000..5e288899021a1 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/build.gradle @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:repository-s3', configuration: 'runtime') +} + +integTestCluster { + plugin ':plugins:repository-s3' +} + +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +boolean useFixture = false + +String s3AccessKey = System.getenv("amazon_s3_access_key") +String s3SecretKey = System.getenv("amazon_s3_secret_key") +String s3Bucket = System.getenv("amazon_s3_bucket") +String s3BasePath = System.getenv("amazon_s3_base_path") + +if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { + s3AccessKey = 's3_integration_test_access_key' + s3SecretKey = 's3_integration_test_secret_key' + s3Bucket = 'bucket_test' + s3BasePath = 'integration_test' + useFixture = true +} + +/** A task to start the AmazonS3Fixture which emulates a S3 service **/ +task s3Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket +} + +Map expansions = [ + 'bucket': s3Bucket, + 'base_path': s3BasePath +] +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + keystoreSetting 's3.client.integration_test.access_key', s3AccessKey + keystoreSetting 's3.client.integration_test.secret_key', s3SecretKey + + if (useFixture) { + dependsOn s3Fixture + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 's3.client.integration_test.endpoint', "http://${-> s3Fixture.addressAndPort}" + } else { + println "Using an external service to test the repository-s3 plugin" + } +} \ No newline at end of file diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java new file mode 100644 index 0000000000000..c8321e83d1390 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.repositories.s3.AmazonS3TestServer.Response; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; + +/** + * {@link AmazonS3Fixture} is a fixture that emulates a S3 service. + *

+ * It starts an asynchronous socket server that binds to a random local port. The server parses + * HTTP requests and uses a {@link AmazonS3TestServer} to handle them before returning + * them to the client as HTTP responses. + */ +public class AmazonS3Fixture { + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AmazonS3Fixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = workingDir(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Emulates S3 + final String storageUrl = "http://" + addressAndPort; + final AmazonS3TestServer storageTestServer = new AmazonS3TestServer(storageUrl); + storageTestServer.createBucket(args[1]); + + httpServer.createContext("/", new ResponseHandler(storageTestServer)); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path workingDir(final String dir) { + return Paths.get(dir); + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + static class ResponseHandler implements HttpHandler { + + private final AmazonS3TestServer storageServer; + + private ResponseHandler(final AmazonS3TestServer storageServer) { + this.storageServer = storageServer; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + String method = exchange.getRequestMethod(); + String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath(); + String query = exchange.getRequestURI().getRawQuery(); + Map> headers = exchange.getRequestHeaders(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(exchange.getRequestBody(), out); + + final Response storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray()); + + Map> responseHeaders = exchange.getResponseHeaders(); + responseHeaders.put("Content-Type", singletonList(storageResponse.contentType)); + storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v))); + exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length); + if (storageResponse.body.length > 0) { + exchange.getResponseBody().write(storageResponse.body); + } + exchange.close(); + } + } +} diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..afcc0fa353482 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class AmazonS3RepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public AmazonS3RepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java new file mode 100644 index 0000000000000..a3ea287b7f829 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java @@ -0,0 +1,542 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.util.DateUtils; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.path.PathTrie; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +/** + * {@link AmazonS3TestServer} emulates a S3 service through a {@link #handle(String, String, String, Map, byte[])} + * method that provides appropriate responses for specific requests like the real S3 platform would do. + * It is largely based on official documentation available at https://docs.aws.amazon.com/AmazonS3/latest/API/. + */ +public class AmazonS3TestServer { + + private static byte[] EMPTY_BYTE = new byte[0]; + /** List of the buckets stored on this test server **/ + private final Map buckets = ConcurrentCollections.newConcurrentMap(); + + /** Request handlers for the requests made by the S3 client **/ + private final PathTrie handlers; + + /** Server endpoint **/ + private final String endpoint; + + /** Increments for the requests ids **/ + private final AtomicLong requests = new AtomicLong(0); + + /** + * Creates a {@link AmazonS3TestServer} with a custom endpoint + */ + AmazonS3TestServer(final String endpoint) { + this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null"); + this.handlers = defaultHandlers(endpoint, buckets); + } + + /** Creates a bucket in the test server **/ + void createBucket(final String bucketName) { + buckets.put(bucketName, new Bucket(bucketName)); + } + + public String getEndpoint() { + return endpoint; + } + + /** + * Returns a response for the given request + * + * @param method the HTTP method of the request + * @param path the path of the URL of the request + * @param query the queryString of the URL of request + * @param headers the HTTP headers of the request + * @param body the HTTP request body + * @return a {@link Response} + * @throws IOException if something goes wrong + */ + public Response handle(final String method, + final String path, + final String query, + final Map> headers, + byte[] body) throws IOException { + + final long requestId = requests.incrementAndGet(); + + final Map params = new HashMap<>(); + if (query != null) { + RestUtils.decodeQueryString(query, 0, params); + } + + final List authorizations = headers.get("Authorization"); + if (authorizations == null + || (authorizations.isEmpty() == false & authorizations.get(0).contains("s3_integration_test_access_key") == false)) { + return newError(requestId, RestStatus.FORBIDDEN, "AccessDenied", "Access Denied", ""); + } + + final RequestHandler handler = handlers.retrieve(method + " " + path, params); + if (handler != null) { + return handler.execute(params, headers, body, requestId); + } else { + return newInternalError(requestId, "No handler defined for request [method: " + method + ", path: " + path + "]"); + } + } + + @FunctionalInterface + interface RequestHandler { + + /** + * Simulates the execution of a S3 request and returns a corresponding response. + * + * @param params the request's query string parameters + * @param headers the request's headers + * @param body the request body provided as a byte array + * @param requestId a unique id for the incoming request + * @return the corresponding response + * + * @throws IOException if something goes wrong + */ + Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException; + } + + /** Builds the default request handlers **/ + private static PathTrie defaultHandlers(final String endpoint, final Map buckets) { + final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); + + // HEAD Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html + objectsPaths("HEAD " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + for (Map.Entry object : bucket.objects.entrySet()) { + if (object.getKey().equals(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // PUT Object & PUT Object Copy + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html + objectsPaths("PUT " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String destBucketName = params.get("bucket"); + + final Bucket destBucket = buckets.get(destBucketName); + if (destBucket == null) { + return newBucketNotFoundError(id, destBucketName); + } + + final String destObjectName = objectName(params); + + // Request is a copy request + List headerCopySource = headers.getOrDefault("x-amz-copy-source", emptyList()); + if (headerCopySource.isEmpty() == false) { + String srcObjectName = headerCopySource.get(0); + + Bucket srcBucket = null; + for (Bucket bucket : buckets.values()) { + String prefix = "/" + bucket.name + "/"; + if (srcObjectName.startsWith(prefix)) { + srcObjectName = srcObjectName.replaceFirst(prefix, ""); + srcBucket = bucket; + break; + } + } + + if (srcBucket == null || srcBucket.objects.containsKey(srcObjectName) == false) { + return newObjectNotFoundError(id, srcObjectName); + } + + byte[] bytes = srcBucket.objects.get(srcObjectName); + if (bytes != null) { + destBucket.objects.put(destObjectName, bytes); + return newCopyResultResponse(id); + } else { + return newObjectNotFoundError(id, srcObjectName); + } + } else { + // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" + // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. + // + // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + // + List headerDecodedContentLength = headers.getOrDefault("X-amz-decoded-content-length", emptyList()); + if (headerDecodedContentLength.size() == 1) { + int contentLength = Integer.valueOf(headerDecodedContentLength.get(0)); + + // Chunked requests have a payload like this: + // + // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8 + // ... bytes of data .... + // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4 + // + try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(body))) { + int b; + // Moves to the end of the first signature line + while ((b = inputStream.read()) != -1) { + if (b == '\n') { + break; + } + } + + final byte[] bytes = new byte[contentLength]; + inputStream.read(bytes, 0, contentLength); + + destBucket.objects.put(destObjectName, bytes); + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + } + return newInternalError(id, "Something is wrong with this PUT request"); + }) + ); + + // DELETE Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html + objectsPaths("DELETE " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.remove(objectName) != null) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // GET Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html + objectsPaths("GET " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.containsKey(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "application/octet-stream", bucket.objects.get(objectName)); + + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // HEAD Bucket + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html + handlers.insert("HEAD " + endpoint + "/{bucket}", (params, headers, body, id) -> { + String bucket = params.get("bucket"); + if (Strings.hasText(bucket) && buckets.containsKey(bucket)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } else { + return newBucketNotFoundError(id, bucket); + } + }); + + // GET Bucket (List Objects) Version 1 + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html + handlers.insert("GET " + endpoint + "/{bucket}/", (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + String prefix = params.get("prefix"); + if (prefix == null) { + List prefixes = headers.get("Prefix"); + if (prefixes != null && prefixes.size() == 1) { + prefix = prefixes.get(0); + } + } + return newListBucketResultResponse(id, bucket, prefix); + }); + + // Delete Multiple Objects + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html + handlers.insert("POST " + endpoint + "/", (params, headers, body, id) -> { + final List deletes = new ArrayList<>(); + final List errors = new ArrayList<>(); + + if (params.containsKey("delete")) { + // The request body is something like: + // ...... + String request = Streams.copyToString(new InputStreamReader(new ByteArrayInputStream(body), StandardCharsets.UTF_8)); + if (request.startsWith("")) { + final String startMarker = ""; + final String endMarker = ""; + + int offset = 0; + while (offset != -1) { + offset = request.indexOf(startMarker, offset); + if (offset > 0) { + int closingOffset = request.indexOf(endMarker, offset); + if (closingOffset != -1) { + offset = offset + startMarker.length(); + final String objectName = request.substring(offset, closingOffset); + + boolean found = false; + for (Bucket bucket : buckets.values()) { + if (bucket.objects.remove(objectName) != null) { + found = true; + } + } + + if (found) { + deletes.add(objectName); + } else { + errors.add(objectName); + } + } + } + } + return newDeleteResultResponse(id, deletes, errors); + } + } + return newInternalError(id, "Something is wrong with this POST multiple deletes request"); + }); + + return handlers; + } + + /** + * Represents a S3 bucket. + */ + static class Bucket { + + /** Bucket name **/ + final String name; + + /** Blobs contained in the bucket **/ + final Map objects; + + Bucket(final String name) { + this.name = Objects.requireNonNull(name); + this.objects = ConcurrentCollections.newConcurrentMap(); + } + } + + /** + * Represents a HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } + + /** + * Decline a path like "http://host:port/{bucket}" into 10 derived paths like: + * - http://host:port/{bucket}/{path0} + * - http://host:port/{bucket}/{path0}/{path1} + * - http://host:port/{bucket}/{path0}/{path1}/{path2} + * - etc + */ + private static List objectsPaths(final String path) { + final List paths = new ArrayList<>(); + String p = path; + for (int i = 0; i < 10; i++) { + p = p + "/{path" + i + "}"; + paths.add(p); + } + return paths; + } + + /** + * Retrieves the object name from all derives paths named {pathX} where 0 <= X < 10. + * + * This is the counterpart of {@link #objectsPaths(String)} + */ + private static String objectName(final Map params) { + final StringBuilder name = new StringBuilder(); + for (int i = 0; i < 10; i++) { + String value = params.getOrDefault("path" + i, null); + if (value != null) { + if (name.length() > 0) { + name.append('/'); + } + name.append(value); + } + } + return name.toString(); + } + + /** + * S3 ListBucketResult Response + */ + private static Response newListBucketResultResponse(final long requestId, final Bucket bucket, final String prefix) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append(""); + if (prefix != null) { + response.append(prefix); + } + response.append(""); + response.append(""); + response.append("1000"); + response.append("false"); + + int count = 0; + for (Map.Entry object : bucket.objects.entrySet()) { + String objectName = object.getKey(); + if (prefix == null || objectName.startsWith(prefix)) { + response.append(""); + response.append("").append(objectName).append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append(""").append(count++).append("""); + response.append("").append(object.getValue().length).append(""); + response.append(""); + } + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 Copy Result Response + */ + private static Response newCopyResultResponse(final long requestId) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append("").append(requestId).append(""); + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 DeleteResult Response + */ + private static Response newDeleteResultResponse(final long requestId, + final List deletedObjects, + final List ignoredObjects) { + final String id = Long.toString(requestId); + + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + for (String deletedObject : deletedObjects) { + response.append(""); + response.append("").append(deletedObject).append(""); + response.append(""); + } + for (String ignoredObject : ignoredObjects) { + response.append(""); + response.append("").append(ignoredObject).append(""); + response.append("NoSuchKey"); + response.append(""); + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + private static Response newBucketNotFoundError(final long requestId, final String bucket) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchBucket", "The specified bucket does not exist", bucket); + } + + private static Response newObjectNotFoundError(final long requestId, final String object) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchKey", "The specified key does not exist", object); + } + + private static Response newInternalError(final long requestId, final String resource) { + return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "We encountered an internal error", resource); + } + + /** + * S3 Error + * + * https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + */ + private static Response newError(final long requestId, + final RestStatus status, + final String code, + final String message, + final String resource) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(code).append(""); + response.append("").append(message).append(""); + response.append("").append(resource).append(""); + response.append("").append(id).append(""); + response.append(""); + return new Response(status, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } +} diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml new file mode 100644 index 0000000000000..8b3daccf0a2d7 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml @@ -0,0 +1,183 @@ +# Integration tests for repository-s3 +--- +"Snapshot/Restore with repository-s3": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: s3 + settings: + bucket: ${bucket} + client: integration_test + base_path: ${base_path} + canned_acl: private + storage_class: standard + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: { repository.settings.bucket : ${bucket} } + - match: { repository.settings.client : "integration_test" } + - match: { repository.settings.base_path : ${base_path} } + - match: { repository.settings.canned_acl : "private" } + - match: { repository.settings.storage_class : "standard" } + - is_false: repository.settings.access_key + - is_false: repository.settings.secret_key + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-s3/qa/build.gradle b/plugins/repository-s3/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 51bb6f2024cd4..e784415b8c999 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -157,7 +157,7 @@ class S3Repository extends BlobStoreRepository { String bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway"); + throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); } boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml deleted file mode 100644 index 74cab3edcb705..0000000000000 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Integration tests for Repository S3 component -# -"S3 repository can be registered": - - do: - snapshot.create_repository: - repository: test_repo_s3_1 - verify: false - body: - type: s3 - settings: - bucket: "my_bucket_name" - canned_acl: "public-read" - storage_class: "standard" - - # Get repository - - do: - snapshot.get_repository: - repository: test_repo_s3_1 - - - is_true: test_repo_s3_1 - - is_true: test_repo_s3_1.settings.bucket - - is_false: test_repo_s3_1.settings.access_key - - is_false: test_repo_s3_1.settings.secret_key - - match: {test_repo_s3_1.settings.canned_acl : "public-read"} diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle deleted file mode 100644 index 8da714dd6278a..0000000000000 --- a/qa/reindex-from-old/build.gradle +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -description = """\ -Tests reindex-from-remote against some specific versions of -Elasticsearch prior to 5.0. Versions of Elasticsearch >= 5.0 -should be able to use the standard launching mechanism which -is more flexible and reliable. -""" - - -import org.apache.tools.ant.taskdefs.condition.Os - -import static org.elasticsearch.gradle.BuildPlugin.getJavaHome - -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -integTestCluster { - // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', '127.0.0.1:*' -} - -configurations { - oldesFixture - es2 - es1 - es090 -} - -dependencies { - oldesFixture project(':test:fixtures:old-elasticsearch') - /* Right now we just test against the latest version of each major we expect - * reindex-from-remote to work against. We could randomize the versions but - * that doesn't seem worth it at this point. */ - es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip' - es1 'org.elasticsearch:elasticsearch:1.7.6@zip' - es090 'org.elasticsearch:elasticsearch:0.90.13@zip' -} - -if (Os.isFamily(Os.FAMILY_WINDOWS)) { - // we can't get the pid files in windows so we skip that - integTest.enabled = false -} else { - /* Set up tasks to unzip and run the old versions of ES before running the - * integration tests. */ - for (String version : ['2', '1', '090']) { - Task unzip = task("unzipEs${version}", type: Sync) { - Configuration oldEsDependency = configurations['es' + version] - dependsOn oldEsDependency - /* Use a closure here to delay resolution of the dependency until we need - * it */ - from { - oldEsDependency.collect { zipTree(it) } - } - into temporaryDir - } - Task fixture = task("oldEs${version}Fixture", - type: org.elasticsearch.gradle.test.AntFixture) { - dependsOn project.configurations.oldesFixture - dependsOn unzip - executable = new File(project.runtimeJavaHome, 'bin/java') - env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', getJavaHome(it, 7) - args 'oldes.OldElasticsearch', - baseDir, - unzip.temporaryDir, - version == '090' - } - integTest.dependsOn fixture - integTestRunner { - /* Use a closure on the string to delay evaluation until right before we - * run the integration tests so that we can be sure that the file is - * ready. */ - systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" - } - } -} diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index d60216dad194f..602dfa2d6ea4f 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -23,9 +23,9 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> integTestCluster { - plugin subproj.path + plugin pluginProject.path } pluginsCount += 1 } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 4086cf2205785..52a6bb1efb5f5 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -17,12 +19,27 @@ * under the License. */ -apply plugin: 'elasticsearch.vagrantsupport' -apply plugin: 'elasticsearch.vagrant' +plugins { + id 'java' + id 'elasticsearch.build' + id 'elasticsearch.vagrantsupport' + id 'elasticsearch.vagrant' +} + +dependencies { + compile "junit:junit:${versions.junit}" + compile "org.hamcrest:hamcrest-core:${versions.hamcrest}" + + // needs to be on the classpath for JarHell + testRuntime project(':libs:elasticsearch-core') + + // pulls in the jar built by this project and its dependencies + packagingTest project(path: project.path, configuration: 'runtime') +} List plugins = [] for (Project subproj : project.rootProject.subprojects) { - if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) { + if (subproj.parent.path == ':plugins' || subproj.path.equals(':example-plugins:custom-settings')) { // add plugin as a dep dependencies { packaging project(path: "${subproj.path}", configuration: 'zip') @@ -39,3 +56,20 @@ setupPackagingTest { expectedPlugins.setText(plugins.join('\n'), 'UTF-8') } } + +esvagrant { + testClass 'org.elasticsearch.packaging.PackagingTests' +} + +forbiddenApisMain { + signaturesURLs = [ + PrecommitTasks.getResource('/forbidden/jdk-signatures.txt') + ] +} + +// we don't have additional tests for the tests themselves +tasks.test.enabled = false + +// this project doesn't get published +tasks.dependencyLicenses.enabled = false +tasks.dependenciesInfo.enabled = false diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java new file mode 100644 index 0000000000000..0b5e7a3b6e0d2 --- /dev/null +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/PackagingTests.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging; + +import org.junit.Test; + +/** + * This class doesn't have any tests yet + */ +public class PackagingTests { + + @Test + public void testDummy() {} +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json index 5ef943eacba6c..f92421b79ae91 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json @@ -18,6 +18,10 @@ } }, "params": { + "copy_settings": { + "type" : "boolean", + "description" : "whether or not to copy settings from the source index (defaults to false)" + }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json index a79fa7b708269..2c14fced28c36 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -18,6 +18,10 @@ } }, "params": { + "copy_settings": { + "type" : "boolean", + "description" : "whether or not to copy settings from the source index (defaults to false)" + }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml index e3af21412ca7b..ae9637c08dd55 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/10_basic.yml @@ -2,7 +2,7 @@ "get cluster state": - do: cluster.state: {} - + - is_true: master_node --- @@ -18,3 +18,18 @@ - is_true: master_node - gte: { compressed_size_in_bytes: 50 } - is_true: compressed_size + +--- +"get cluster state returns cluster_uuid at the top level": + - skip: + version: " - 6.3.99" + reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher" + + - do: + cluster.state: + human: true + + - is_true: cluster_uuid + - is_true: master_node + - gte: { compressed_size_in_bytes: 50 } + - is_true: compressed_size diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml index 1e1d57125601c..880efaff19aa6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml @@ -156,3 +156,19 @@ setup: - is_true: routing_table.indices.index1 - is_true: metadata.indices.index2 - is_true: routing_table.indices.index2 + +--- +"Filtering the cluster state returns cluster_uuid at the top level regardless of metric filters": + - skip: + version: " - 6.3.99" + reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher" + + - do: + cluster.state: + metric: [ master_node, version, metadata ] + + - is_true: cluster_uuid + - is_true: master_node + - is_true: version + - is_true: state_uuid + - is_true: metadata diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml new file mode 100644 index 0000000000000..34757427e6983 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -0,0 +1,94 @@ +--- +"Copy settings during shrink index": + - skip: + version: " - 6.3.99" + reason: copy_settings did not exist prior to 6.4.0 + features: "warnings" + + - do: + cluster.state: {} + + # get master node id + - set: { master_node: master } + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + # ensure everything is allocated on the master node + index.routing.allocation.include._id: $master + index.number_of_replicas: 0 + index.merge.scheduler.max_merge_count: 4 + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do a actual shrink and copy settings + - do: + indices.shrink: + index: "source" + target: "copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: true + body: + settings: + index.number_of_replicas: 0 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [true]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "copy-settings-target" + + # settings should be copied + - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } + - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - match: { copy-settings-target.settings.index.blocks.write: "true" } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + + # now we do a actual shrink and do not copy settings + - do: + indices.shrink: + index: "source" + target: "no-copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: false + body: + settings: + index.number_of_replicas: 0 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [false]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "no-copy-settings-target" + + # only the request setting should be copied + - is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count + - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - is_false: no-copy-settings-target.settings.index.blocks.write + - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml new file mode 100644 index 0000000000000..1d3e37aa7b05d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -0,0 +1,98 @@ +--- +"Copy settings during split index": + - skip: + version: " - 6.3.99" + reason: copy_settings did not exist prior to 6.4.0 + features: "warnings" + + - do: + cluster.state: {} + + # get master node id + - set: { master_node: master } + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + # ensure everything is allocated on the master node + index.routing.allocation.include._id: $master + index.number_of_replicas: 0 + index.number_of_shards: 1 + index.number_of_routing_shards: 4 + index.merge.scheduler.max_merge_count: 4 + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do a actual split and copy settings + - do: + indices.split: + index: "source" + target: "copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: true + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 2 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [true]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "copy-settings-target" + + # settings should be copied + - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } + - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - match: { copy-settings-target.settings.index.blocks.write: "true" } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + + # now we do a actual shrink and do not copy settings + - do: + indices.split: + index: "source" + target: "no-copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: false + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 2 + index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated but was [false]" + + - do: + cluster.health: + wait_for_status: green + + - do: + indices.get_settings: + index: "no-copy-settings-target" + + # only the request setting should be copied + - is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count + - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - is_false: no-copy-settings-target.settings.index.blocks.write + - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index e62e906d58287..2030960f354c2 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; @@ -635,8 +636,25 @@ public ElasticsearchException[] guessRootCauses() { public static ElasticsearchException[] guessRootCauses(Throwable t) { Throwable ex = ExceptionsHelper.unwrapCause(t); if (ex instanceof ElasticsearchException) { + // ElasticsearchException knows how to guess its own root cause return ((ElasticsearchException) ex).guessRootCauses(); } + if (ex instanceof XContentParseException) { + /* + * We'd like to unwrap parsing exceptions to the inner-most + * parsing exception because that is generally the most interesting + * exception to return to the user. If that exception is caused by + * an ElasticsearchException we'd like to keep unwrapping because + * ElasticserachExceptions tend to contain useful information for + * the user. + */ + Throwable cause = ex.getCause(); + if (cause != null) { + if (cause instanceof XContentParseException || cause instanceof ElasticsearchException) { + return guessRootCauses(ex.getCause()); + } + } + } return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) { @Override protected String getExceptionName() { diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java b/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java index 0b809e0923b62..7e0fd3a24cb09 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchWrapperException.java @@ -19,7 +19,11 @@ package org.elasticsearch; +/** + * An exception that is meant to be "unwrapped" when sent back to the user + * as an error because its is {@link #getCause() cause}, if non-null is + * always more useful to the user than the exception itself. + */ public interface ElasticsearchWrapperException { - Throwable getCause(); } diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 60ba0a43396e4..392b307a8aa79 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -241,6 +241,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; +import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction; import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction; @@ -270,8 +271,6 @@ import org.elasticsearch.rest.action.admin.indices.RestRecoveryAction; import org.elasticsearch.rest.action.admin.indices.RestRefreshAction; import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction; -import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction; -import org.elasticsearch.rest.action.admin.indices.RestSplitIndexAction; import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction; @@ -569,8 +568,8 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestIndexPutAliasAction(settings, restController)); registerHandler.accept(new RestIndicesAliasesAction(settings, restController)); registerHandler.accept(new RestCreateIndexAction(settings, restController)); - registerHandler.accept(new RestShrinkIndexAction(settings, restController)); - registerHandler.accept(new RestSplitIndexAction(settings, restController)); + registerHandler.accept(new RestResizeHandler.RestShrinkIndexAction(settings, restController)); + registerHandler.accept(new RestResizeHandler.RestSplitIndexAction(settings, restController)); registerHandler.accept(new RestRolloverIndexAction(settings, restController)); registerHandler.accept(new RestDeleteIndexAction(settings, restController)); registerHandler.accept(new RestCloseIndexAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 69ba6db63ef07..ca97ef9c8a333 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -75,7 +75,7 @@ public enum Result implements Writeable { Result(int op) { this.op = (byte) op; - this.lowercase = this.toString().toLowerCase(Locale.ENGLISH); + this.lowercase = this.name().toLowerCase(Locale.ROOT); } public byte getOp() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 63b426ab324c4..dcf362446384c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -230,9 +230,9 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId); List shardStatusBuilder = new ArrayList<>(); if (snapshotInfo.state().completed()) { - Map shardStatues = - snapshotsService.snapshotShards(request.repository(), snapshotInfo); - for (Map.Entry shardStatus : shardStatues.entrySet()) { + Map shardStatuses = + snapshotsService.snapshotShards(repositoryName, repositoryData, snapshotInfo); + for (Map.Entry shardStatus : shardStatuses.entrySet()) { IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy(); shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 1734c340bd4ef..ce77790637c8d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -46,6 +46,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final boolean updateAllTypes; private Index recoverFrom; private ResizeType resizeType; + private boolean copySettings; private IndexMetaData.State state = IndexMetaData.State.OPEN; @@ -115,6 +116,11 @@ public CreateIndexClusterStateUpdateRequest resizeType(ResizeType resizeType) { return this; } + public CreateIndexClusterStateUpdateRequest copySettings(final boolean copySettings) { + this.copySettings = copySettings; + return this; + } + public TransportMessage originalMessage() { return originalMessage; } @@ -178,4 +184,9 @@ public ActiveShardCount waitForActiveShards() { public ResizeType resizeType() { return resizeType; } + + public boolean copySettings() { + return copySettings; + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index 79600674f4ade..f53b5437f03c2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.admin.indices.shrink; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -55,6 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest implements private CreateIndexRequest targetIndexRequest; private String sourceIndex; private ResizeType type = ResizeType.SHRINK; + private boolean copySettings = false; ResizeRequest() {} @@ -96,6 +98,11 @@ public void readFrom(StreamInput in) throws IOException { } else { type = ResizeType.SHRINK; // BWC this used to be shrink only } + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + copySettings = in.readBoolean(); + } else { + copySettings = false; + } } @Override @@ -106,6 +113,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { out.writeEnum(type); } + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeBoolean(copySettings); + } } @Override @@ -177,6 +187,14 @@ public ResizeType getResizeType() { return type; } + public void setCopySettings(final boolean copySettings) { + this.copySettings = copySettings; + } + + public boolean getCopySettings() { + return copySettings; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 606e6076d6133..af22f30091852 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -173,19 +173,19 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi settingsBuilder.put("index.number_of_shards", numShards); targetIndex.settings(settingsBuilder); - return new CreateIndexClusterStateUpdateRequest(targetIndex, - cause, targetIndex.index(), targetIndexName, true) - // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be - // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss - // the mappings for everything is corrupted and hard to debug - .ackTimeout(targetIndex.timeout()) - .masterNodeTimeout(targetIndex.masterNodeTimeout()) - .settings(targetIndex.settings()) - .aliases(targetIndex.aliases()) - .customs(targetIndex.customs()) - .waitForActiveShards(targetIndex.waitForActiveShards()) - .recoverFrom(metaData.getIndex()) - .resizeType(resizeRequest.getResizeType()); + return new CreateIndexClusterStateUpdateRequest(targetIndex, cause, targetIndex.index(), targetIndexName, true) + // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be + // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we + // miss the mappings for everything is corrupted and hard to debug + .ackTimeout(targetIndex.timeout()) + .masterNodeTimeout(targetIndex.masterNodeTimeout()) + .settings(targetIndex.settings()) + .aliases(targetIndex.aliases()) + .customs(targetIndex.customs()) + .waitForActiveShards(targetIndex.waitForActiveShards()) + .recoverFrom(metaData.getIndex()) + .resizeType(resizeRequest.getResizeType()) + .copySettings(resizeRequest.getCopySettings()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java b/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java index 7f16b7c4d6d0b..1f228b0f355e0 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java @@ -29,11 +29,4 @@ public interface MappingUpdatePerformer { */ void updateMappings(Mapping update, ShardId shardId, String type); - /** - * Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the operation needs to be - * retried on the primary due to the mappings not being present yet, or a different exception if - * updating the mappings on the master failed. - */ - void verifyMappings(Mapping update, ShardId shardId); - } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 8fb490c4b6531..da3e883436a98 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -43,6 +43,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -64,7 +65,9 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.Map; +import java.util.function.Function; import java.util.function.LongSupplier; /** Performs shard-level bulk (index, delete or update) operations */ @@ -137,12 +140,15 @@ private static BulkItemResultHolder executeIndexRequest(final IndexRequest index final IndexShard primary, final MappingUpdatePerformer mappingUpdater) throws Exception { Engine.IndexResult indexResult = executeIndexRequestOnPrimary(indexRequest, primary, mappingUpdater); - if (indexResult.hasFailure()) { - return new BulkItemResultHolder(null, indexResult, bulkItemRequest); - } else { - IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), + switch (indexResult.getResultType()) { + case SUCCESS: + IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(), primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated()); - return new BulkItemResultHolder(response, indexResult, bulkItemRequest); + return new BulkItemResultHolder(response, indexResult, bulkItemRequest); + case FAILURE: + return new BulkItemResultHolder(null, indexResult, bulkItemRequest); + default: + throw new AssertionError("unknown result type for " + indexRequest + ": " + indexResult.getResultType()); } } @@ -151,19 +157,24 @@ private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest del final IndexShard primary, final MappingUpdatePerformer mappingUpdater) throws Exception { Engine.DeleteResult deleteResult = executeDeleteRequestOnPrimary(deleteRequest, primary, mappingUpdater); - if (deleteResult.hasFailure()) { - return new BulkItemResultHolder(null, deleteResult, bulkItemRequest); - } else { - DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(), + switch (deleteResult.getResultType()) { + case SUCCESS: + DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(), primary.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound()); - return new BulkItemResultHolder(response, deleteResult, bulkItemRequest); + return new BulkItemResultHolder(response, deleteResult, bulkItemRequest); + case FAILURE: + return new BulkItemResultHolder(null, deleteResult, bulkItemRequest); + case MAPPING_UPDATE_REQUIRED: + throw new AssertionError("delete operation leaked a mapping update " + deleteRequest); + default: + throw new AssertionError("unknown result type for " + deleteRequest + ": " + deleteResult.getResultType()); } } static Translog.Location calculateTranslogLocation(final Translog.Location originalLocation, final BulkItemResultHolder bulkItemResult) { final Engine.Result operationResult = bulkItemResult.operationResult; - if (operationResult != null && operationResult.hasFailure() == false) { + if (operationResult != null && operationResult.getResultType() == Engine.Result.Type.SUCCESS) { return locationToSync(originalLocation, operationResult.getTranslogLocation()); } else { return originalLocation; @@ -186,13 +197,13 @@ static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResul assert response.getResult() == DocWriteResponse.Result.NOOP : "only noop updates can have a null operation"; return new BulkItemResponse(replicaRequest.id(), opType, response); - } else if (operationResult.hasFailure() == false) { + } else if (operationResult.getResultType() == Engine.Result.Type.SUCCESS) { BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response); // set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though. primaryResponse.getResponse().setShardInfo(new ShardInfo()); return primaryResponse; - } else { + } else if (operationResult.getResultType() == Engine.Result.Type.FAILURE) { DocWriteRequest docWriteRequest = replicaRequest.request(); Exception failure = operationResult.getFailure(); if (isConflictException(failure)) { @@ -217,6 +228,8 @@ static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResul assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response"; return null; } + } else { + throw new AssertionError("unknown result type for " + request + ": " + operationResult.getResultType()); } } @@ -273,7 +286,7 @@ private static boolean isConflictException(final Exception e) { */ static BulkItemResultHolder processUpdateResponse(final UpdateRequest updateRequest, final String concreteIndex, final Engine.Result result, final UpdateHelper.Result translate, - final IndexShard primary, final int bulkReqId) throws Exception { + final IndexShard primary, final int bulkReqId) { assert result.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "failed result should not have a sequence number"; Engine.Operation.TYPE opType = result.getOperationType(); @@ -372,12 +385,14 @@ static BulkItemResultHolder executeUpdateRequestOnce(UpdateRequest updateRequest // this is a noop operation final UpdateResponse updateResponse = translate.action(); return new BulkItemResultHolder(updateResponse, result, primaryItemRequest); - } else if (result.hasFailure()) { + } else if (result.getResultType() == Engine.Result.Type.FAILURE) { // There was a result, and the result was a failure return new BulkItemResultHolder(null, result, primaryItemRequest); - } else { + } else if (result.getResultType() == Engine.Result.Type.SUCCESS) { // It was successful, we need to construct the response and return it return processUpdateResponse(updateRequest, concreteIndex, result, translate, primary, bulkReqId); + } else { + throw new AssertionError("unknown result type for " + updateRequest + ": " + result.getResultType()); } } @@ -505,6 +520,7 @@ public static Translog.Location performOnReplica(BulkShardRequest request, Index private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse, DocWriteRequest docWriteRequest, IndexShard replica) throws Exception { + final Engine.Result result; switch (docWriteRequest.opType()) { case CREATE: case INDEX: @@ -514,24 +530,24 @@ private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse SourceToParse.source(shardId.getIndexName(), indexRequest.type(), indexRequest.id(), indexRequest.source(), indexRequest.getContentType()) .routing(indexRequest.routing()).parent(indexRequest.parent()); - return replica.applyIndexOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(), + result = replica.applyIndexOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(), indexRequest.versionType().versionTypeForReplicationAndRecovery(), indexRequest.getAutoGeneratedTimestamp(), - indexRequest.isRetry(), sourceToParse, update -> { - throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), - "Mappings are not available on the replica yet, triggered update: " + update); - }); + indexRequest.isRetry(), sourceToParse); + break; case DELETE: DeleteRequest deleteRequest = (DeleteRequest) docWriteRequest; - return replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(), - deleteRequest.type(), deleteRequest.id(), deleteRequest.versionType().versionTypeForReplicationAndRecovery(), - update -> { - throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), - "Mappings are not available on the replica yet, triggered update: " + update); - }); + result = replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(), + deleteRequest.type(), deleteRequest.id(), deleteRequest.versionType().versionTypeForReplicationAndRecovery()); + break; default: throw new IllegalStateException("Unexpected request operation type on replica: " + docWriteRequest.opType().getLowercase()); } + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), + "Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate()); + } + return result; } /** Executes index operation on primary shard after updates mapping if dynamic mappings are found */ @@ -540,50 +556,61 @@ static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, Ind final SourceToParse sourceToParse = SourceToParse.source(request.index(), request.type(), request.id(), request.source(), request.getContentType()) .routing(request.routing()).parent(request.parent()); - try { - // if a mapping update is required to index this request, issue a mapping update on the master, and abort the - // current indexing operation so that it can be retried with the updated mapping from the master - // The early abort uses the RetryOnPrimaryException, but any other exception would be fine as well. - return primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse, - request.getAutoGeneratedTimestamp(), request.isRetry(), update -> { - mappingUpdater.updateMappings(update, primary.shardId(), sourceToParse.type()); - throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated"); - }); - } catch (ReplicationOperation.RetryOnPrimaryException e) { - return primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse, - request.getAutoGeneratedTimestamp(), request.isRetry(), update -> mappingUpdater.verifyMappings(update, primary.shardId())); - } + return executeOnPrimaryWhileHandlingMappingUpdates(primary.shardId(), request.type(), + () -> + primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse, + request.getAutoGeneratedTimestamp(), request.isRetry()), + e -> new Engine.IndexResult(e, request.version()), + mappingUpdater); } private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary, MappingUpdatePerformer mappingUpdater) throws Exception { - try { - return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(), - update -> { - mappingUpdater.updateMappings(update, primary.shardId(), request.type()); - throw new ReplicationOperation.RetryOnPrimaryException(primary.shardId(), "Mapping updated"); - }); - } catch (ReplicationOperation.RetryOnPrimaryException e) { - return primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType(), - update -> mappingUpdater.verifyMappings(update, primary.shardId())); - } + return executeOnPrimaryWhileHandlingMappingUpdates(primary.shardId(), request.type(), + () -> primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType()), + e -> new Engine.DeleteResult(e, request.version()), + mappingUpdater); } - class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer { - - public void updateMappings(final Mapping update, final ShardId shardId, final String type) { - if (update != null) { - // can throw timeout exception when updating mappings or ISE for attempting to - // update default mappings which are bubbled up - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update); + private static T executeOnPrimaryWhileHandlingMappingUpdates(ShardId shardId, String type, + CheckedSupplier toExecute, + Function onError, + MappingUpdatePerformer mappingUpdater) + throws IOException { + T result = toExecute.get(); + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + // try to update the mappings and try again. + try { + mappingUpdater.updateMappings(result.getRequiredMappingUpdate(), shardId, type); + } catch (Exception e) { + // failure to update the mapping should translate to a failure of specific requests. Other requests + // still need to be executed and replicated. + return onError.apply(e); } - } - public void verifyMappings(final Mapping update, final ShardId shardId) { - if (update != null) { + result = toExecute.get(); + + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + // double mapping update. We assume that the successful mapping update wasn't yet processed on the node + // and retry the entire request again. throw new ReplicationOperation.RetryOnPrimaryException(shardId, - "Dynamic mappings are not available on the node that holds the primary yet"); + "Dynamic mappings are not available on the node that holds the primary yet"); } } + assert result.getFailure() instanceof ReplicationOperation.RetryOnPrimaryException == false : + "IndexShard shouldn't use RetryOnPrimaryException. got " + result.getFailure(); + return result; + + } + + class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer { + + public void updateMappings(final Mapping update, final ShardId shardId, final String type) { + assert update != null; + assert shardId != null; + // can throw timeout exception when updating mappings or ISE for attempting to + // update default mappings which are bubbled up + mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java index a7b9b06473243..14b7f65239ba1 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteResponse.java @@ -40,7 +40,16 @@ public DeleteResponse() { } public DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean found) { - super(shardId, type, id, seqNo, primaryTerm, version, found ? Result.DELETED : Result.NOT_FOUND); + this(shardId, type, id, seqNo, primaryTerm, version, found ? Result.DELETED : Result.NOT_FOUND); + } + + private DeleteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { + super(shardId, type, id, seqNo, primaryTerm, version, assertDeletedOrNotFound(result)); + } + + private static Result assertDeletedOrNotFound(Result result) { + assert result == Result.DELETED || result == Result.NOT_FOUND; + return result; } @Override @@ -87,8 +96,7 @@ public static class Builder extends DocWriteResponse.Builder { @Override public DeleteResponse build() { - DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version, - result == Result.DELETED ? true : false); + DeleteResponse deleteResponse = new DeleteResponse(shardId, type, id, seqNo, primaryTerm, version, result); deleteResponse.setForcedRefresh(forcedRefresh); if (shardInfo != null) { deleteResponse.setShardInfo(shardInfo); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 743086165f689..3174e4d8ab187 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -41,7 +41,16 @@ public IndexResponse() { } public IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, boolean created) { - super(shardId, type, id, seqNo, primaryTerm, version, created ? Result.CREATED : Result.UPDATED); + this(shardId, type, id, seqNo, primaryTerm, version, created ? Result.CREATED : Result.UPDATED); + } + + private IndexResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { + super(shardId, type, id, seqNo, primaryTerm, version, assertCreatedOrUpdated(result)); + } + + private static Result assertCreatedOrUpdated(Result result) { + assert result == Result.CREATED || result == Result.UPDATED; + return result; } @Override @@ -87,11 +96,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * instantiate the {@link IndexResponse}. */ public static class Builder extends DocWriteResponse.Builder { - @Override public IndexResponse build() { - IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version, - result == Result.CREATED ? true : false); + IndexResponse indexResponse = new IndexResponse(shardId, type, id, seqNo, primaryTerm, version, result); indexResponse.setForcedRefresh(forcedRefresh); if (shardInfo != null) { indexResponse.setShardInfo(shardInfo); diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index c182fb24ffb11..3dd2bd4df580f 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -122,11 +122,11 @@ public static Translog.Location performOnReplica(ResyncReplicationRequest reques Translog.Location location = null; for (Translog.Operation operation : request.getOperations()) { try { - final Engine.Result operationResult = replica.applyTranslogOperation(operation, Engine.Operation.Origin.REPLICA, - update -> { - throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), - "Mappings are not available on the replica yet, triggered update: " + update); - }); + final Engine.Result operationResult = replica.applyTranslogOperation(operation, Engine.Operation.Origin.REPLICA); + if (operationResult.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(), + "Mappings are not available on the replica yet, triggered update: " + operationResult.getRequiredMappingUpdate()); + } location = syncOperationResultOrThrow(operationResult, location); } catch (Exception e) { // if its not a failure to be ignored, let it bubble up diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 2a3e8be7aa8bb..b14fd156b735d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -33,23 +33,17 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -78,7 +72,7 @@ protected TransportWriteAction(Settings settings, String actionName, TransportSe protected static Location syncOperationResultOrThrow(final Engine.Result operationResult, final Location currentLocation) throws Exception { final Location location; - if (operationResult.hasFailure()) { + if (operationResult.getFailure() != null) { // check if any transient write operation failures should be bubbled up Exception failure = operationResult.getFailure(); assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure; @@ -384,7 +378,9 @@ class WriteActionReplicasProxy extends ReplicasProxy { @Override public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { - logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); + if (TransportActions.isShardNotAvailableException(exception) == false) { + logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); + } shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, true, message, exception, createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 30c8df07ec1a5..2b991d1dc611a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -326,6 +326,9 @@ public String toString() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true); + // always provide the cluster_uuid as part of the top-level response (also part of the metadata response) + builder.field("cluster_uuid", metaData().clusterUUID()); + if (metrics.contains(Metric.VERSION)) { builder.field("version", version); builder.field("state_uuid", stateUUID); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 4f5dade2219df..be7b0b483182c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -61,6 +61,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentHelper; @@ -227,9 +228,19 @@ private void onlyCreateIndex(final CreateIndexClusterStateUpdateRequest request, Settings build = updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); indexScopedSettings.validate(build, true); // we do validate here - index setting must be consistent request.settings(build); - clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", - new IndexCreationTask(logger, allocationService, request, listener, indicesService, aliasValidator, xContentRegistry, settings, - this::validate)); + clusterService.submitStateUpdateTask( + "create-index [" + request.index() + "], cause [" + request.cause() + "]", + new IndexCreationTask( + logger, + allocationService, + request, + listener, + indicesService, + aliasValidator, + xContentRegistry, + settings, + this::validate, + indexScopedSettings)); } interface IndexValidator { @@ -246,11 +257,12 @@ static class IndexCreationTask extends AckedClusterStateUpdateTask listener, IndicesService indicesService, AliasValidator aliasValidator, NamedXContentRegistry xContentRegistry, - Settings settings, IndexValidator validator) { + Settings settings, IndexValidator validator, IndexScopedSettings indexScopedSettings) { super(Priority.URGENT, request, listener); this.request = request; this.logger = logger; @@ -260,6 +272,7 @@ static class IndexCreationTask extends AckedClusterStateUpdateTask templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); + List templates = + MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); Map customs = new HashMap<>(); @@ -401,7 +415,14 @@ public ClusterState execute(ClusterState currentState) throws Exception { if (recoverFromIndex != null) { assert request.resizeType() != null; prepareResizeIndexSettings( - currentState, mappings.keySet(), indexSettingsBuilder, recoverFromIndex, request.index(), request.resizeType()); + currentState, + mappings.keySet(), + indexSettingsBuilder, + recoverFromIndex, + request.index(), + request.resizeType(), + request.copySettings(), + indexScopedSettings); } final Settings actualIndexSettings = indexSettingsBuilder.build(); tmpImdBuilder.settings(actualIndexSettings); @@ -671,8 +692,15 @@ static IndexMetaData validateResize(ClusterState state, String sourceIndex, return sourceMetaData; } - static void prepareResizeIndexSettings(ClusterState currentState, Set mappingKeys, Settings.Builder indexSettingsBuilder, - Index resizeSourceIndex, String resizeIntoName, ResizeType type) { + static void prepareResizeIndexSettings( + final ClusterState currentState, + final Set mappingKeys, + final Settings.Builder indexSettingsBuilder, + final Index resizeSourceIndex, + final String resizeIntoName, + final ResizeType type, + final boolean copySettings, + final IndexScopedSettings indexScopedSettings) { final IndexMetaData sourceMetaData = currentState.metaData().index(resizeSourceIndex.getName()); if (type == ResizeType.SHRINK) { final List nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(), @@ -693,14 +721,37 @@ static void prepareResizeIndexSettings(ClusterState currentState, Set ma throw new IllegalStateException("unknown resize type is " + type); } - final Predicate sourceSettingsPredicate = (s) -> s.startsWith("index.similarity.") - || s.startsWith("index.analysis.") || s.startsWith("index.sort.") || s.equals("index.mapping.single_type"); + final Settings.Builder builder = Settings.builder(); + if (copySettings) { + // copy all settings and non-copyable settings and settings that have already been set (e.g., from the request) + for (final String key : sourceMetaData.getSettings().keySet()) { + final Setting setting = indexScopedSettings.get(key); + if (setting == null) { + assert indexScopedSettings.isPrivateSetting(key) : key; + } else if (setting.getProperties().contains(Setting.Property.NotCopyableOnResize)) { + continue; + } + // do not override settings that have already been set (for example, from the request) + if (indexSettingsBuilder.keys().contains(key)) { + continue; + } + builder.copy(key, sourceMetaData.getSettings()); + } + } else { + final Predicate sourceSettingsPredicate = + (s) -> ( + s.startsWith("index.similarity.") + || s.startsWith("index.analysis.") + || s.startsWith("index.sort.") + || s.equals("index.mapping.single_type")) + && indexSettingsBuilder.keys().contains(s) == false; + builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)); + } + indexSettingsBuilder - // now copy all similarity / analysis / sort / single_type settings - this overrides all settings from the user unless they - // wanna add extra settings .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) - .put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)) + .put(builder.build()) .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 6aa2d83fa8d47..7a0b92858966f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -50,8 +50,8 @@ public class RepositoriesMetaData extends AbstractNamedDiffable implemen * * @param repositories list of repositories */ - public RepositoriesMetaData(RepositoryMetaData... repositories) { - this.repositories = Arrays.asList(repositories); + public RepositoriesMetaData(List repositories) { + this.repositories = repositories; } /** @@ -164,7 +164,7 @@ public static RepositoriesMetaData fromXContent(XContentParser parser) throws IO throw new ElasticsearchParseException("failed to parse repositories"); } } - return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); + return new RepositoriesMetaData(repository); } /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index da5c49de7316d..07ef2068ad633 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -44,6 +44,7 @@ import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 9d4ee53aa1aa9..d9e42a67671c6 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -114,7 +114,13 @@ public enum Property { /** * Index scope */ - IndexScope + IndexScope, + + /** + * Mark this setting as not copyable during an index resize (shrink or split). This property can only be applied to settings that + * also have {@link Property#IndexScope}. + */ + NotCopyableOnResize } private final Key key; @@ -142,10 +148,15 @@ private Setting(Key key, @Nullable Setting fallbackSetting, Function propertiesAsSet = EnumSet.copyOf(Arrays.asList(properties)); + if (propertiesAsSet.contains(Property.Dynamic) && propertiesAsSet.contains(Property.Final)) { throw new IllegalArgumentException("final setting [" + key + "] cannot be dynamic"); } + if (propertiesAsSet.contains(Property.NotCopyableOnResize) && propertiesAsSet.contains(Property.IndexScope) == false) { + throw new IllegalArgumentException( + "non-index-scoped setting [" + key + "] can not have property [" + Property.NotCopyableOnResize + "]"); + } + this.properties = propertiesAsSet; } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index b75cda5b6ca70..8293f873c65eb 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -144,7 +144,7 @@ private void setReformat(boolean reformat) { @Override public void postIndex(ShardId shardId, Engine.Index indexOperation, Engine.IndexResult result) { - if (result.hasFailure() == false) { + if (result.getResultType() == Engine.Result.Type.SUCCESS) { final ParsedDocument doc = indexOperation.parsedDoc(); final long tookInNanos = result.getTook(); if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 2eaac603ba753..508b718033e3e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -58,6 +58,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.merge.MergeStats; @@ -295,27 +296,45 @@ public Condition newCondition() { **/ public abstract static class Result { private final Operation.TYPE operationType; + private final Result.Type resultType; private final long version; private final long seqNo; private final Exception failure; private final SetOnce freeze = new SetOnce<>(); + private final Mapping requiredMappingUpdate; private Translog.Location translogLocation; private long took; protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) { this.operationType = operationType; - this.failure = failure; + this.failure = Objects.requireNonNull(failure); this.version = version; this.seqNo = seqNo; + this.requiredMappingUpdate = null; + this.resultType = Type.FAILURE; } protected Result(Operation.TYPE operationType, long version, long seqNo) { - this(operationType, null, version, seqNo); + this.operationType = operationType; + this.version = version; + this.seqNo = seqNo; + this.failure = null; + this.requiredMappingUpdate = null; + this.resultType = Type.SUCCESS; + } + + protected Result(Operation.TYPE operationType, Mapping requiredMappingUpdate) { + this.operationType = operationType; + this.version = Versions.NOT_FOUND; + this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + this.failure = null; + this.requiredMappingUpdate = requiredMappingUpdate; + this.resultType = Type.MAPPING_UPDATE_REQUIRED; } - /** whether the operation had failure */ - public boolean hasFailure() { - return failure != null; + /** whether the operation was successful, has failed or was aborted due to a mapping update */ + public Type getResultType() { + return resultType; } /** get the updated document version */ @@ -332,6 +351,14 @@ public long getSeqNo() { return seqNo; } + /** + * If the operation was aborted due to missing mappings, this method will return the mappings + * that are required to complete the operation. + */ + public Mapping getRequiredMappingUpdate() { + return requiredMappingUpdate; + } + /** get the translog location after executing the operation */ public Translog.Location getTranslogLocation() { return translogLocation; @@ -371,6 +398,11 @@ void freeze() { freeze.set(true); } + public enum Type { + SUCCESS, + FAILURE, + MAPPING_UPDATE_REQUIRED + } } public static class IndexResult extends Result { @@ -383,9 +415,8 @@ public IndexResult(long version, long seqNo, boolean created) { } /** - * use in case of index operation failed before getting to internal engine - * (e.g while preparing operation or updating mappings) - * */ + * use in case of the index operation failed before getting to internal engine + **/ public IndexResult(Exception failure, long version) { this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO); } @@ -395,6 +426,11 @@ public IndexResult(Exception failure, long version, long seqNo) { this.created = false; } + public IndexResult(Mapping requiredMappingUpdate) { + super(Operation.TYPE.INDEX, requiredMappingUpdate); + this.created = false; + } + public boolean isCreated() { return created; } @@ -410,11 +446,23 @@ public DeleteResult(long version, long seqNo, boolean found) { this.found = found; } + /** + * use in case of the delete operation failed before getting to internal engine + **/ + public DeleteResult(Exception failure, long version) { + this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO, false); + } + public DeleteResult(Exception failure, long version, long seqNo, boolean found) { super(Operation.TYPE.DELETE, failure, version, seqNo); this.found = found; } + public DeleteResult(Mapping requiredMappingUpdate) { + super(Operation.TYPE.DELETE, requiredMappingUpdate); + this.found = false; + } + public boolean isFound() { return found; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 4baa09a1422de..8ee41df88113c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -803,7 +803,7 @@ public IndexResult index(Index index) throws IOException { final IndexResult indexResult; if (plan.earlyResultOnPreFlightError.isPresent()) { indexResult = plan.earlyResultOnPreFlightError.get(); - assert indexResult.hasFailure(); + assert indexResult.getResultType() == Result.Type.FAILURE : indexResult.getResultType(); } else if (plan.indexIntoLucene) { indexResult = indexIntoLucene(index, plan); } else { @@ -812,7 +812,7 @@ public IndexResult index(Index index) throws IOException { } if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location location; - if (indexResult.hasFailure() == false) { + if (indexResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Index(index, indexResult)); } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { // if we have document failure, record it as a no-op in the translog with the generated seq_no @@ -822,7 +822,7 @@ public IndexResult index(Index index) throws IOException { } indexResult.setTranslogLocation(location); } - if (plan.indexIntoLucene && indexResult.hasFailure() == false) { + if (plan.indexIntoLucene && indexResult.getResultType() == Result.Type.SUCCESS) { final Translog.Location translogLocation = trackTranslogLocation.get() ? indexResult.getTranslogLocation() : null; versionMap.maybePutIndexUnderLock(index.uid().bytes(), new IndexVersionValue(translogLocation, plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); @@ -1136,7 +1136,7 @@ public DeleteResult delete(Delete delete) throws IOException { } if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location location; - if (deleteResult.hasFailure() == false) { + if (deleteResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Delete(delete, deleteResult)); } else if (deleteResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(), diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index eff065888a896..7de8668908fe0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -653,22 +653,21 @@ private IndexShardState changeState(IndexShardState newState, String reason) { } public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType versionType, SourceToParse sourceToParse, - long autoGeneratedTimestamp, boolean isRetry, - Consumer onMappingUpdate) throws IOException { + long autoGeneratedTimestamp, boolean isRetry) throws IOException { return applyIndexOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, autoGeneratedTimestamp, - isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse, onMappingUpdate); + isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse); } public Engine.IndexResult applyIndexOperationOnReplica(long seqNo, long version, VersionType versionType, - long autoGeneratedTimeStamp, boolean isRetry, SourceToParse sourceToParse, - Consumer onMappingUpdate) throws IOException { + long autoGeneratedTimeStamp, boolean isRetry, SourceToParse sourceToParse) + throws IOException { return applyIndexOperation(seqNo, primaryTerm, version, versionType, autoGeneratedTimeStamp, isRetry, - Engine.Operation.Origin.REPLICA, sourceToParse, onMappingUpdate); + Engine.Operation.Origin.REPLICA, sourceToParse); } private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, VersionType versionType, long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, - SourceToParse sourceToParse, Consumer onMappingUpdate) throws IOException { + SourceToParse sourceToParse) throws IOException { assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; assert versionType.validateVersionForWrites(version); ensureWriteAllowed(origin); @@ -679,14 +678,15 @@ private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, l autoGeneratedTimeStamp, isRetry); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { - // wrap this in the outer catch block, as the master might also throw a MapperParsingException when updating the mapping - onMappingUpdate.accept(update); + return new Engine.IndexResult(update); } - } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) { - return new Engine.IndexResult(e, version, seqNo); } catch (Exception e) { + // We treat any exception during parsing and or mapping update as a document level failure + // with the exception side effects of closing the shard. Since we don't have the shard, we + // can not raise an exception that may block any replication of previous operations to the + // replicas verifyNotClosed(e); - throw e; + return new Engine.IndexResult(e, version, seqNo); } return index(getEngine(), operation); @@ -750,21 +750,19 @@ private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) { return engine.noOp(noOp); } - public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType, - Consumer onMappingUpdate) throws IOException { + public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType) + throws IOException { return applyDeleteOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, type, id, versionType, - Engine.Operation.Origin.PRIMARY, onMappingUpdate); + Engine.Operation.Origin.PRIMARY); } public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long version, String type, String id, - VersionType versionType, - Consumer onMappingUpdate) throws IOException { - return applyDeleteOperation(seqNo, primaryTerm, version, type, id, versionType, Engine.Operation.Origin.REPLICA, onMappingUpdate); + VersionType versionType) throws IOException { + return applyDeleteOperation(seqNo, primaryTerm, version, type, id, versionType, Engine.Operation.Origin.REPLICA); } private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, long version, String type, String id, - VersionType versionType, Engine.Operation.Origin origin, - Consumer onMappingUpdate) throws IOException { + VersionType versionType, Engine.Operation.Origin origin) throws IOException { assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; assert versionType.validateVersionForWrites(version); ensureWriteAllowed(origin); @@ -776,10 +774,10 @@ private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, // would fail if bar#1 is indexed with a lower version than foo#1 was deleted with. // In order to work around this issue, we make deletions create types. This way, we // fail if index and delete operations do not use the same type. - try { + try{ Mapping update = docMapper(type).getMapping(); if (update != null) { - onMappingUpdate.accept(update); + return new Engine.DeleteResult(update); } } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) { return new Engine.DeleteResult(e, version, seqNo, false); @@ -1227,8 +1225,7 @@ public void prepareForIndexRecovery() { assert currentEngineReference.get() == null; } - public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin, - Consumer onMappingUpdate) throws IOException { + public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException { final Engine.Result result; switch (operation.opType()) { case INDEX: @@ -1238,13 +1235,12 @@ public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine result = applyIndexOperation(index.seqNo(), index.primaryTerm(), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), index.getAutoGeneratedIdTimestamp(), true, origin, source(shardId.getIndexName(), index.type(), index.id(), index.source(), - XContentHelper.xContentType(index.source())) - .routing(index.routing()).parent(index.parent()), onMappingUpdate); + XContentHelper.xContentType(index.source())).routing(index.routing()).parent(index.parent())); break; case DELETE: final Translog.Delete delete = (Translog.Delete) operation; result = applyDeleteOperation(delete.seqNo(), delete.primaryTerm(), delete.version(), delete.type(), delete.id(), - delete.versionType().versionTypeForReplicationAndRecovery(), origin, onMappingUpdate); + delete.versionType().versionTypeForReplicationAndRecovery(), origin); break; case NO_OP: final Translog.NoOp noOp = (Translog.NoOp) operation; @@ -1265,10 +1261,18 @@ int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOExce while ((operation = snapshot.next()) != null) { try { logger.trace("[translog] recover op {}", operation); - Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, update -> { - throw new IllegalArgumentException("unexpected mapping update: " + update); - }); - ExceptionsHelper.reThrowIfNotNull(result.getFailure()); + Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY); + switch (result.getResultType()) { + case FAILURE: + throw result.getFailure(); + case MAPPING_UPDATE_REQUIRED: + throw new IllegalArgumentException("unexpected mapping update: " + result.getRequiredMappingUpdate()); + case SUCCESS: + break; + default: + throw new AssertionError("Unknown result type [" + result.getResultType() + "]"); + } + opsRecovered++; recoveryState.getTranslog().incrementRecoveredOperations(); } catch (Exception e) { @@ -1276,7 +1280,7 @@ int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOExce // mainly for MapperParsingException and Failure to detect xcontent logger.info("ignoring recovery of a corrupt translog entry", e); } else { - throw e; + throw ExceptionsHelper.convertToRuntime(e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java index ada869a1d9c0b..e7b7b719aed6d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/InternalIndexingStats.java @@ -66,7 +66,7 @@ IndexingStats stats(boolean isThrottled, long currentThrottleInMillis, String... @Override public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { - if (!operation.origin().isRecovery()) { + if (operation.origin().isRecovery() == false) { totalStats.indexCurrent.inc(); typeStats(operation.type()).indexCurrent.inc(); } @@ -75,17 +75,22 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { @Override public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { - if (result.hasFailure() == false) { - if (!index.origin().isRecovery()) { - long took = result.getTook(); - totalStats.indexMetric.inc(took); - totalStats.indexCurrent.dec(); - StatsHolder typeStats = typeStats(index.type()); - typeStats.indexMetric.inc(took); - typeStats.indexCurrent.dec(); - } - } else { - postIndex(shardId, index, result.getFailure()); + switch (result.getResultType()) { + case SUCCESS: + if (index.origin().isRecovery() == false) { + long took = result.getTook(); + totalStats.indexMetric.inc(took); + totalStats.indexCurrent.dec(); + StatsHolder typeStats = typeStats(index.type()); + typeStats.indexMetric.inc(took); + typeStats.indexCurrent.dec(); + } + break; + case FAILURE: + postIndex(shardId, index, result.getFailure()); + break; + default: + throw new IllegalArgumentException("unknown result type: " + result.getResultType()); } } @@ -111,17 +116,22 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { @Override public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) { - if (result.hasFailure() == false) { - if (!delete.origin().isRecovery()) { - long took = result.getTook(); - totalStats.deleteMetric.inc(took); - totalStats.deleteCurrent.dec(); - StatsHolder typeStats = typeStats(delete.type()); - typeStats.deleteMetric.inc(took); - typeStats.deleteCurrent.dec(); - } - } else { - postDelete(shardId, delete, result.getFailure()); + switch (result.getResultType()) { + case SUCCESS: + if (!delete.origin().isRecovery()) { + long took = result.getTook(); + totalStats.deleteMetric.inc(took); + totalStats.deleteCurrent.dec(); + StatsHolder typeStats = typeStats(delete.type()); + typeStats.deleteMetric.inc(took); + typeStats.deleteCurrent.dec(); + } + break; + case FAILURE: + postDelete(shardId, delete, result.getFailure()); + break; + default: + throw new IllegalArgumentException("unknown result type: " + result.getResultType()); } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index d8e2ec5354764..ac5a5047464bf 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -34,8 +34,8 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.Scheduler.Cancellable; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import java.io.Closeable; @@ -210,7 +210,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul /** called by IndexShard to record estimated bytes written to translog for the operation */ private void recordOperationBytes(Engine.Operation operation, Engine.Result result) { - if (result.hasFailure() == false) { + if (result.getResultType() == Engine.Result.Type.SUCCESS) { statusChecker.bytesWritten(operation.estimatedSizeInBytes()); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 63b68aa8b132a..e2004eda17fc1 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -394,10 +394,11 @@ public long indexTranslogOperations(List operations, int tot throw new IndexShardNotRecoveringException(shardId, indexShard().state()); } for (Translog.Operation operation : operations) { - Engine.Result result = indexShard().applyTranslogOperation(operation, Engine.Operation.Origin.PEER_RECOVERY, update -> { + Engine.Result result = indexShard().applyTranslogOperation(operation, Engine.Operation.Origin.PEER_RECOVERY); + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { throw new MapperException("mapping updates are not allowed [" + operation + "]"); - }); - assert result.hasFailure() == false : "unexpected failure while replicating translog entry: " + result.getFailure(); + } + assert result.getFailure() == null: "unexpected failure while replicating translog entry: " + result.getFailure(); ExceptionsHelper.reThrowIfNotNull(result.getFailure()); } // update stats only after all operations completed (to ensure that mapping updates don't mess with stats) diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 577ccc78de7b8..636e108468e82 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -114,7 +114,8 @@ public ClusterState execute(ClusterState currentState) throws IOException { RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (repositories == null) { logger.info("put repository [{}]", request.name); - repositories = new RepositoriesMetaData(new RepositoryMetaData(request.name, request.type, request.settings)); + repositories = new RepositoriesMetaData( + Collections.singletonList(new RepositoryMetaData(request.name, request.type, request.settings))); } else { boolean found = false; List repositoriesMetaData = new ArrayList<>(repositories.repositories().size() + 1); @@ -133,7 +134,7 @@ public ClusterState execute(ClusterState currentState) throws IOException { } else { logger.info("update repository [{}]", request.name); } - repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()])); + repositories = new RepositoriesMetaData(repositoriesMetaData); } mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); return ClusterState.builder(currentState).metaData(mdBuilder).build(); @@ -185,7 +186,7 @@ public ClusterState execute(ClusterState currentState) { } } if (changed) { - repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()])); + repositories = new RepositoriesMetaData(repositoriesMetaData); mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); return ClusterState.builder(currentState).metaData(mdBuilder).build(); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 102bc5a5f0524..7a8d8327d5e3a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -230,13 +230,6 @@ public Set getSnapshots(final IndexId indexId) { return snapshotIds; } - /** - * Initializes the indices in the repository metadata; returns a new instance. - */ - public RepositoryData initIndices(final Map> indexSnapshots) { - return new RepositoryData(genId, snapshotIds, snapshotStates, indexSnapshots, incompatibleSnapshotIds); - } - @Override public boolean equals(Object obj) { if (this == obj) { @@ -352,9 +345,10 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final * Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata. */ public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException { - Map snapshots = new HashMap<>(); - Map snapshotStates = new HashMap<>(); - Map> indexSnapshots = new HashMap<>(); + final Map snapshots = new HashMap<>(); + final Map snapshotStates = new HashMap<>(); + final Map> indexSnapshots = new HashMap<>(); + if (parser.nextToken() == XContentParser.Token.START_OBJECT) { while (parser.nextToken() == XContentParser.Token.FIELD_NAME) { String field = parser.currentName(); @@ -397,17 +391,18 @@ public static RepositoryData snapshotsFromXContent(final XContentParser parser, throw new ElasticsearchParseException("start object expected [indices]"); } while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String indexName = parser.currentName(); - String indexId = null; - Set snapshotIds = new LinkedHashSet<>(); + final String indexName = parser.currentName(); + final Set snapshotIds = new LinkedHashSet<>(); + + IndexId indexId = null; if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("start object expected index[" + indexName + "]"); } while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String indexMetaFieldName = parser.currentName(); + final String indexMetaFieldName = parser.currentName(); parser.nextToken(); if (INDEX_ID.equals(indexMetaFieldName)) { - indexId = parser.text(); + indexId = new IndexId(indexName, parser.text()); } else if (SNAPSHOTS.equals(indexMetaFieldName)) { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { throw new ElasticsearchParseException("start array expected [snapshots]"); @@ -428,12 +423,22 @@ public static RepositoryData snapshotsFromXContent(final XContentParser parser, // since we already have the name/uuid combo in the snapshots array uuid = parser.text(); } - snapshotIds.add(snapshots.get(uuid)); + + SnapshotId snapshotId = snapshots.get(uuid); + if (snapshotId != null) { + snapshotIds.add(snapshotId); + } else { + // A snapshotted index references a snapshot which does not exist in + // the list of snapshots. This can happen when multiple clusters in + // different versions create or delete snapshot in the same repository. + throw new ElasticsearchParseException("Detected a corrupted repository, index " + indexId + + " references an unknown snapshot uuid [" + uuid + "]"); + } } } } assert indexId != null; - indexSnapshots.put(new IndexId(indexName, indexId), snapshotIds); + indexSnapshots.put(indexId, snapshotIds); } } else { throw new ElasticsearchParseException("unknown field name [" + field + "]"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java new file mode 100644 index 0000000000000..e6c994a85c35d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +public abstract class RestResizeHandler extends BaseRestHandler { + + RestResizeHandler(final Settings settings) { + super(settings); + } + + @Override + public abstract String getName(); + + abstract ResizeType getResizeType(); + + @Override + public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); + resizeRequest.setResizeType(getResizeType()); + final String rawCopySettings = request.param("copy_settings"); + final boolean copySettings; + if (rawCopySettings == null) { + copySettings = resizeRequest.getCopySettings(); + } else { + deprecationLogger.deprecated("parameter [copy_settings] is deprecated but was [" + rawCopySettings + "]"); + if (rawCopySettings.length() == 0) { + copySettings = true; + } else { + copySettings = Booleans.parseBoolean(rawCopySettings); + } + } + resizeRequest.setCopySettings(copySettings); + request.applyContentParser(resizeRequest::fromXContent); + resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); + resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); + resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); + return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); + } + + public static class RestShrinkIndexAction extends RestResizeHandler { + + public RestShrinkIndexAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/{index}/_shrink/{target}", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_shrink/{target}", this); + } + + @Override + public String getName() { + return "shrink_index_action"; + } + + @Override + protected ResizeType getResizeType() { + return ResizeType.SHRINK; + } + + } + + public static class RestSplitIndexAction extends RestResizeHandler { + + public RestSplitIndexAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/{index}/_split/{target}", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_split/{target}", this); + } + + @Override + public String getName() { + return "split_index_action"; + } + + @Override + protected ResizeType getResizeType() { + return ResizeType.SPLIT; + } + + } + +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java deleted file mode 100644 index be875dd0a55aa..0000000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeType; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; - -import java.io.IOException; - -public class RestShrinkIndexAction extends BaseRestHandler { - public RestShrinkIndexAction(Settings settings, RestController controller) { - super(settings); - controller.registerHandler(RestRequest.Method.PUT, "/{index}/_shrink/{target}", this); - controller.registerHandler(RestRequest.Method.POST, "/{index}/_shrink/{target}", this); - } - - @Override - public String getName() { - return "shrink_index_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); - shrinkIndexRequest.setResizeType(ResizeType.SHRINK); - request.applyContentParser(shrinkIndexRequest::fromXContent); - shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); - shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); - shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel)); - } -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java deleted file mode 100644 index d465c4ebb749b..0000000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; -import org.elasticsearch.action.admin.indices.shrink.ResizeType; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; - -import java.io.IOException; - -public class RestSplitIndexAction extends BaseRestHandler { - public RestSplitIndexAction(Settings settings, RestController controller) { - super(settings); - controller.registerHandler(RestRequest.Method.PUT, "/{index}/_split/{target}", this); - controller.registerHandler(RestRequest.Method.POST, "/{index}/_split/{target}", this); - } - - @Override - public String getName() { - return "split_index_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); - shrinkIndexRequest.setResizeType(ResizeType.SPLIT); - request.applyContentParser(shrinkIndexRequest::fromXContent); - shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); - shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); - shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new RestToXContentListener<>(channel)); - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index 66b8f8d5b15ed..25f83caa3eb92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -20,10 +20,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -103,11 +101,22 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size); SignificantStringTerms.Bucket spare = null; - for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) { - if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) { + final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0; + final long maxId = needsFullScan ? valueCount : bucketOrds.size(); + for (long ord = 0; ord < maxId; ord++) { + final long globalOrd; + final long bucketOrd; + if (needsFullScan) { + bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord); + globalOrd = ord; + } else { + assert bucketOrds != null; + bucketOrd = ord; + globalOrd = bucketOrds.get(ord); + } + if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) { continue; } - final long bucketOrd = getBucketOrd(globalTermOrd); final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd); if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) { continue; @@ -120,7 +129,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format); } spare.bucketOrd = bucketOrd; - copy(lookupGlobalOrd.apply(globalTermOrd), spare.termBytes); + copy(lookupGlobalOrd.apply(globalOrd), spare.termBytes); spare.subsetDf = bucketDocCount; spare.subsetSize = subsetSize; spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.termBytes); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 6ad14b8d0f93a..03eb00337e9c1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -71,7 +71,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr protected final long valueCount; protected final GlobalOrdLookupFunction lookupGlobalOrd; - private final LongHash bucketOrds; + protected final LongHash bucketOrds; public interface GlobalOrdLookupFunction { BytesRef apply(long ord) throws IOException; @@ -107,10 +107,6 @@ boolean remapGlobalOrds() { return bucketOrds != null; } - protected final long getBucketOrd(long globalOrd) { - return bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd); - } - private void collectGlobalOrd(int doc, long globalOrd, LeafBucketCollector sub) throws IOException { if (bucketOrds == null) { collectExistingBucket(sub, doc, globalOrd); @@ -188,17 +184,28 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE long otherDocCount = 0; BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(this)); OrdBucket spare = new OrdBucket(-1, 0, null, showTermDocCountError, 0); - for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) { - if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) { + final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0; + final long maxId = needsFullScan ? valueCount : bucketOrds.size(); + for (long ord = 0; ord < maxId; ord++) { + final long globalOrd; + final long bucketOrd; + if (needsFullScan) { + bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord); + globalOrd = ord; + } else { + assert bucketOrds != null; + bucketOrd = ord; + globalOrd = bucketOrds.get(ord); + } + if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) { continue; } - final long bucketOrd = getBucketOrd(globalTermOrd); final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd); if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) { continue; } otherDocCount += bucketDocCount; - spare.globalOrd = globalTermOrd; + spare.globalOrd = globalOrd; spare.bucketOrd = bucketOrd; spare.docCount = bucketDocCount; if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) { @@ -378,7 +385,7 @@ private void mapSegmentCountsToGlobalCounts(LongUnaryOperator mapping) throws IO } final long ord = i - 1; // remember we do +1 when counting final long globalOrd = mapping.applyAsLong(ord); - long bucketOrd = getBucketOrd(globalOrd); + long bucketOrd = bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd); incrementBucketDocCount(bucketOrd, inc); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index e6b54a20a1e07..84f058562c1a5 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -371,7 +371,7 @@ private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMeta // Make sure that the number of shards is the same. That's the only thing that we cannot change if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) { throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + - "] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards"); + "] shards from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards"); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 7ab1ba43158ad..f13e238dd0eec 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -592,11 +592,12 @@ public List currentSnapshots(final String repository, * @return map of shard id to snapshot status */ public Map snapshotShards(final String repositoryName, + final RepositoryData repositoryData, final SnapshotInfo snapshotInfo) throws IOException { - Map shardStatus = new HashMap<>(); - Repository repository = repositoriesService.repository(repositoryName); - RepositoryData repositoryData = repository.getRepositoryData(); - MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(snapshotInfo.indices())); + final Repository repository = repositoriesService.repository(repositoryName); + final Map shardStatus = new HashMap<>(); + final MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(snapshotInfo.indices())); + for (String index : snapshotInfo.indices()) { IndexId indexId = repositoryData.resolveIndexId(index); IndexMetaData indexMetaData = metaData.indices().get(index); diff --git a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index d3560fc6db355..6e4c97fd3dad2 100644 --- a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.DiscoverySettings; @@ -78,6 +79,7 @@ import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; public class ElasticsearchExceptionTests extends ESTestCase { @@ -124,13 +126,13 @@ public void testGuessRootCause() { } else { rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex); } - assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "parsing_exception"); - assertEquals(rootCauses[0].getMessage(), "foobar"); + assertEquals("parsing_exception", ElasticsearchException.getExceptionName(rootCauses[0])); + assertEquals("foobar", rootCauses[0].getMessage()); ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar")); rootCauses = oneLevel.guessRootCauses(); - assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "exception"); - assertEquals(rootCauses[0].getMessage(), "foo"); + assertEquals("exception", ElasticsearchException.getExceptionName(rootCauses[0])); + assertEquals("foo", rootCauses[0].getMessage()); } { ShardSearchFailure failure = new ShardSearchFailure( @@ -146,20 +148,40 @@ public void testGuessRootCause() { assertEquals(rootCauses.length, 2); assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "parsing_exception"); assertEquals(rootCauses[0].getMessage(), "foobar"); - assertEquals(((ParsingException) rootCauses[0]).getLineNumber(), 1); - assertEquals(((ParsingException) rootCauses[0]).getColumnNumber(), 2); - assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "query_shard_exception"); - assertEquals((rootCauses[1]).getIndex().getName(), "foo1"); - assertEquals(rootCauses[1].getMessage(), "foobar"); + assertEquals(1, ((ParsingException) rootCauses[0]).getLineNumber()); + assertEquals(2, ((ParsingException) rootCauses[0]).getColumnNumber()); + assertEquals("query_shard_exception", ElasticsearchException.getExceptionName(rootCauses[1])); + assertEquals("foo1", rootCauses[1].getIndex().getName()); + assertEquals("foobar", rootCauses[1].getMessage()); } { final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException("foobar")); assertEquals(foobars.length, 1); - assertTrue(foobars[0] instanceof ElasticsearchException); - assertEquals(foobars[0].getMessage(), "foobar"); - assertEquals(foobars[0].getCause().getClass(), IllegalArgumentException.class); - assertEquals(foobars[0].getExceptionName(), "illegal_argument_exception"); + assertThat(foobars[0], instanceOf(ElasticsearchException.class)); + assertEquals("foobar", foobars[0].getMessage()); + assertEquals(IllegalArgumentException.class, foobars[0].getCause().getClass()); + assertEquals("illegal_argument_exception", foobars[0].getExceptionName()); + } + + { + XContentParseException inner = new XContentParseException(null, "inner"); + XContentParseException outer = new XContentParseException(null, "outer", inner); + final ElasticsearchException[] causes = ElasticsearchException.guessRootCauses(outer); + assertEquals(causes.length, 1); + assertThat(causes[0], instanceOf(ElasticsearchException.class)); + assertEquals("inner", causes[0].getMessage()); + assertEquals("x_content_parse_exception", causes[0].getExceptionName()); + } + + { + ElasticsearchException inner = new ElasticsearchException("inner"); + XContentParseException outer = new XContentParseException(null, "outer", inner); + final ElasticsearchException[] causes = ElasticsearchException.guessRootCauses(outer); + assertEquals(causes.length, 1); + assertThat(causes[0], instanceOf(ElasticsearchException.class)); + assertEquals("inner", causes[0].getMessage()); + assertEquals("exception", causes[0].getExceptionName()); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 4ced505717a2e..7d671096514f4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -69,6 +69,7 @@ public void testToXContent() throws IOException { assertEquals("{\n" + " \"acknowledged\" : true,\n" + " \"state\" : {\n" + + " \"cluster_uuid\" : \"_na_\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" + " \"master_node\" : \"node0\",\n" + @@ -136,6 +137,7 @@ public void testToXContent() throws IOException { assertEquals("{\n" + " \"acknowledged\" : true,\n" + " \"state\" : {\n" + + " \"cluster_uuid\" : \"_na_\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" + " \"master_node\" : \"node0\"\n" + @@ -168,6 +170,7 @@ public void testToXContent() throws IOException { assertEquals("{\n" + " \"acknowledged\" : true,\n" + " \"state\" : {\n" + + " \"cluster_uuid\" : \"_na_\",\n" + " \"metadata\" : {\n" + " \"cluster_uuid\" : \"_na_\",\n" + " \"templates\" : { },\n" + diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index dd76564ca3282..f4a72dccdcc73 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -82,7 +82,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { private IndexMetaData indexMetaData() throws IOException { return IndexMetaData.builder("index") - .putMapping("type", + .putMapping("_doc", "{\"properties\":{\"foo\":{\"type\":\"text\",\"fields\":" + "{\"keyword\":{\"type\":\"keyword\",\"ignore_above\":256}}}}}") .settings(idxSettings) @@ -91,7 +91,7 @@ private IndexMetaData indexMetaData() throws IOException { public void testShouldExecuteReplicaItem() throws Exception { // Successful index request should be replicated - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 17, 1, randomBoolean()); BulkItemRequest request = new BulkItemRequest(0, writeRequest); @@ -100,7 +100,7 @@ public void testShouldExecuteReplicaItem() throws Exception { equalTo(ReplicaItemExecutionMode.NORMAL)); // Failed index requests without sequence no should not be replicated - writeRequest = new IndexRequest("index", "type", "id") + writeRequest = new IndexRequest("index", "_doc", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse( @@ -137,9 +137,7 @@ public void testExecuteBulkIndexRequest() throws Exception { BulkItemRequest[] items = new BulkItemRequest[1]; boolean create = randomBoolean(); - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") - .create(create); + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE).create(create); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; BulkShardRequest bulkShardRequest = @@ -166,9 +164,7 @@ public void testExecuteBulkIndexRequest() throws Exception { // Assert that the document actually made it there assertDocCount(shard, 1); - writeRequest = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") - .create(true); + writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE).create(true); primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -176,7 +172,7 @@ public void testExecuteBulkIndexRequest() throws Exception { Translog.Location secondLocation = TransportShardBulkAction.executeBulkItemRequest( metaData, shard, bulkShardRequest, newLocation, 0, updateHelper, - threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); + threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(new RuntimeException("fail"))); // Translog should not change, since the document was not indexed due to a version conflict assertThat(secondLocation, equalTo(newLocation)); @@ -193,7 +189,7 @@ public void testExecuteBulkIndexRequest() throws Exception { BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("type")); + assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause().getClass(), equalTo(VersionConflictEngineException.class)); assertThat(failure.getCause().getMessage(), @@ -212,8 +208,8 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { BulkItemRequest[] items = new BulkItemRequest[randomIntBetween(2, 5)]; for (int i = 0; i < items.length; i++) { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id_" + i) - .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar-" + i) + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id_" + i) + .source(Requests.INDEX_CONTENT_TYPE) .opType(DocWriteRequest.OpType.INDEX); items[i] = new BulkItemRequest(i, writeRequest); } @@ -240,7 +236,7 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { BulkItemResponse response = result.finalResponseIfSuccessful.getResponses()[i]; assertThat(response.getItemId(), equalTo(i)); assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("type")); + assertThat(response.getType(), equalTo("_doc")); assertThat(response.getId(), equalTo("id_" + i)); assertThat(response.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); if (response.getItemId() == rejectItem.id()) { @@ -262,7 +258,7 @@ public void testExecuteBulkIndexRequestWithRejection() throws Exception { IndexShard shard = newStartedShard(true); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = @@ -272,26 +268,20 @@ public void testExecuteBulkIndexRequestWithRejection() throws Exception { UpdateHelper updateHelper = null; // Pretend the mappings haven't made it to the node yet, and throw a rejection - RuntimeException err = new ReplicationOperation.RetryOnPrimaryException(shardId, "rejection"); - - try { - TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, - location, 0, updateHelper, threadPool::absoluteTimeInMillis, - new ThrowingVerifyingMappingUpdatePerformer(err)); - fail("should have thrown a retry exception"); - } catch (ReplicationOperation.RetryOnPrimaryException e) { - assertThat(e, equalTo(err)); - } + expectThrows(ReplicationOperation.RetryOnPrimaryException.class, + () -> TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, + location, 0, updateHelper, threadPool::absoluteTimeInMillis, + new NoopMappingUpdatePerformer())); closeShards(shard); } - public void testExecuteBulkIndexRequestWithConflictingMappings() throws Exception { + public void testExecuteBulkIndexRequestWithErrorWhileUpdatingMapping() throws Exception { IndexMetaData metaData = indexMetaData(); IndexShard shard = newStartedShard(true); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = @@ -300,8 +290,8 @@ public void testExecuteBulkIndexRequestWithConflictingMappings() throws Exceptio Translog.Location location = new Translog.Location(0, 0, 0); UpdateHelper updateHelper = null; - // Return a mapping conflict (IAE) when trying to update the mapping - RuntimeException err = new IllegalArgumentException("mapping conflict"); + // Return an exception when trying to update the mapping + RuntimeException err = new RuntimeException("some kind of exception"); Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location, 0, updateHelper, @@ -318,13 +308,12 @@ public void testExecuteBulkIndexRequestWithConflictingMappings() throws Exceptio assertThat(primaryResponse.getId(), equalTo("id")); assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); assertTrue(primaryResponse.isFailed()); - assertThat(primaryResponse.getFailureMessage(), containsString("mapping conflict")); + assertThat(primaryResponse.getFailureMessage(), containsString("some kind of exception")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("type")); + assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); - assertThat(failure.getStatus(), equalTo(RestStatus.BAD_REQUEST)); closeShards(shard); } @@ -334,7 +323,7 @@ public void testExecuteBulkDeleteRequest() throws Exception { IndexShard shard = newStartedShard(true); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new DeleteRequest("index", "type", "id"); + DocWriteRequest writeRequest = new DeleteRequest("index", "_doc", "id"); items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -366,16 +355,16 @@ public void testExecuteBulkDeleteRequest() throws Exception { assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOT_FOUND)); assertThat(response.getShardId(), equalTo(shard.shardId())); assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("type")); + assertThat(response.getType(), equalTo("_doc")); assertThat(response.getId(), equalTo("id")); assertThat(response.getVersion(), equalTo(1L)); assertThat(response.getSeqNo(), equalTo(0L)); assertThat(response.forcedRefresh(), equalTo(false)); // Now do the same after indexing the document, it should now find and delete the document - indexDoc(shard, "type", "id", "{\"foo\": \"bar\"}"); + indexDoc(shard, "_doc", "id", "{}"); - writeRequest = new DeleteRequest("index", "type", "id"); + writeRequest = new DeleteRequest("index", "_doc", "id"); items[0] = new BulkItemRequest(0, writeRequest); bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); @@ -405,7 +394,7 @@ public void testExecuteBulkDeleteRequest() throws Exception { assertThat(response.getResult(), equalTo(DocWriteResponse.Result.DELETED)); assertThat(response.getShardId(), equalTo(shard.shardId())); assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("type")); + assertThat(response.getType(), equalTo("_doc")); assertThat(response.getId(), equalTo("id")); assertThat(response.getVersion(), equalTo(3L)); assertThat(response.getSeqNo(), equalTo(2L)); @@ -416,11 +405,11 @@ public void testExecuteBulkDeleteRequest() throws Exception { } public void testNoopUpdateReplicaRequest() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id") .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); - DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "index", "id", 0, + DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "_doc", "id", 0, DocWriteResponse.Result.NOOP); BulkItemResultHolder noopResults = new BulkItemResultHolder(noopUpdateResponse, null, replicaRequest); @@ -447,8 +436,7 @@ public void testNoopUpdateReplicaRequest() throws Exception { } public void testUpdateReplicaRequestWithFailure() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); Exception err = new ElasticsearchException("I'm dead <(x.x)>"); @@ -477,18 +465,17 @@ public void testUpdateReplicaRequestWithFailure() throws Exception { assertThat(primaryResponse.getFailureMessage(), containsString("I'm dead <(x.x)>")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("type")); + assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); } public void testUpdateReplicaRequestWithConflictFailure() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); - Exception err = new VersionConflictEngineException(shardId, "type", "id", + Exception err = new VersionConflictEngineException(shardId, "_doc", "id", "I'm conflicted <(;_;)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, @@ -515,21 +502,21 @@ public void testUpdateReplicaRequestWithConflictFailure() throws Exception { assertThat(primaryResponse.getFailureMessage(), containsString("I'm conflicted <(;_;)>")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("type")); + assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); } public void testUpdateReplicaRequestWithSuccess() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id") + .source(Requests.INDEX_CONTENT_TYPE); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); boolean created = randomBoolean(); Translog.Location resultLocation = new Translog.Location(42, 42, 42); Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation); - DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, created); + DocWriteResponse indexResponse = new IndexResponse(shardId, "_doc", "id", 1, 17, 1, created); BulkItemResultHolder goodResults = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest); @@ -558,8 +545,8 @@ public void testUpdateReplicaRequestWithSuccess() throws Exception { public void testCalculateTranslogLocation() throws Exception { final Translog.Location original = new Translog.Location(0, 0, 0); - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id") + .source(Requests.INDEX_CONTENT_TYPE); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); BulkItemResultHolder results = new BulkItemResultHolder(null, null, replicaRequest); @@ -567,7 +554,7 @@ public void testCalculateTranslogLocation() throws Exception { equalTo(original)); boolean created = randomBoolean(); - DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 17, 1, created); + DocWriteResponse indexResponse = new IndexResponse(shardId, "_doc", "id", 1, 17, 1, created); Translog.Location newLocation = new Translog.Location(1, 1, 1); final long version = randomNonNegativeLong(); final long seqNo = randomNonNegativeLong(); @@ -580,10 +567,7 @@ public void testCalculateTranslogLocation() throws Exception { public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception { final IndexShard shard = spy(newStartedShard(false)); - BulkItemRequest itemRequest = new BulkItemRequest(0, - new IndexRequest("index", "type") - .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") - ); + BulkItemRequest itemRequest = new BulkItemRequest(0, new IndexRequest("index", "_doc").source(Requests.INDEX_CONTENT_TYPE)); final String failureMessage = "simulated primary failure"; final IOException exception = new IOException(failureMessage); itemRequest.setPrimaryResponse(new BulkItemResponse(0, @@ -592,7 +576,7 @@ public void testNoOpReplicationOnPrimaryDocumentFailure() throws Exception { DocWriteRequest.OpType.DELETE, DocWriteRequest.OpType.INDEX ), - new BulkItemResponse.Failure("index", "type", "1", + new BulkItemResponse.Failure("index", "_doc", "1", exception, 1L) )); BulkItemRequest[] itemRequests = new BulkItemRequest[1]; @@ -609,33 +593,23 @@ public void testMappingUpdateParsesCorrectNumberOfTimes() throws Exception { logger.info("--> metadata.getIndex(): {}", metaData.getIndex()); final IndexShard shard = spy(newStartedShard(true)); - IndexRequest request = new IndexRequest("index", "type", "id") + IndexRequest request = new IndexRequest("index", "_doc", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); final AtomicInteger updateCalled = new AtomicInteger(0); - final AtomicInteger verifyCalled = new AtomicInteger(0); - TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard, - new MappingUpdatePerformer() { - @Override - public void updateMappings(Mapping update, ShardId shardId, String type) { - // There should indeed be a mapping update - assertNotNull(update); - updateCalled.incrementAndGet(); - } - - @Override - public void verifyMappings(Mapping update, ShardId shardId) { - // No-op, will be called - logger.info("--> verifying mappings noop"); - verifyCalled.incrementAndGet(); - } - }); + expectThrows(ReplicationOperation.RetryOnPrimaryException.class, + () -> TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard, + (update, shardId, type) -> { + // There should indeed be a mapping update + assertNotNull(update); + updateCalled.incrementAndGet(); + })); assertThat("mappings were \"updated\" once", updateCalled.get(), equalTo(1)); - assertThat("mappings were \"verified\" once", verifyCalled.get(), equalTo(1)); + // Verify that the shard "executed" the operation twice - verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean(), any()); + verify(shard, times(2)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean()); // Update the mapping, so the next mapping updater doesn't do anything final MapperService mapperService = shard.mapperService(); @@ -643,21 +617,11 @@ public void verifyMappings(Mapping update, ShardId shardId) { mapperService.updateMapping(metaData); TransportShardBulkAction.executeIndexRequestOnPrimary(request, shard, - new MappingUpdatePerformer() { - @Override - public void updateMappings(Mapping update, ShardId shardId, String type) { - fail("should not have had to update the mappings"); - } - - @Override - public void verifyMappings(Mapping update, ShardId shardId) { - fail("should not have had to update the mappings"); - } - }); + (update, shardId, type) -> fail("should not have had to update the mappings")); // Verify that the shard "executed" the operation only once (2 for previous invocations plus // 1 for this execution) - verify(shard, times(3)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean(), any()); + verify(shard, times(3)).applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyBoolean()); closeShards(shard); } @@ -678,16 +642,16 @@ public Translog.Location getTranslogLocation() { public void testProcessUpdateResponse() throws Exception { IndexShard shard = newStartedShard(false); - UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id"); BulkItemRequest request = new BulkItemRequest(0, updateRequest); - Exception err = new VersionConflictEngineException(shardId, "type", "id", + Exception err = new VersionConflictEngineException(shardId, "_doc", "id", "I'm conflicted <(;_;)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); Engine.DeleteResult deleteResult = new Engine.DeleteResult(1, 1, true); DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED; DocWriteResponse.Result deleteWriteResult = DocWriteResponse.Result.DELETED; - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); - DeleteRequest deleteRequest = new DeleteRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index", "_doc", "id"); + DeleteRequest deleteRequest = new DeleteRequest("index", "_doc", "id"); UpdateHelper.Result translate = new UpdateHelper.Result(indexRequest, docWriteResult, new HashMap(), XContentType.JSON); UpdateHelper.Result translateDelete = new UpdateHelper.Result(deleteRequest, deleteWriteResult, @@ -733,30 +697,28 @@ public void testExecuteUpdateRequestOnce() throws Exception { IndexShard shard = newStartedShard(true); Map source = new HashMap<>(); - source.put("foo", "bar"); BulkItemRequest[] items = new BulkItemRequest[1]; boolean create = randomBoolean(); - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") - .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") - .create(create); + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE).create(create); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Translog.Location location = new Translog.Location(0, 0, 0); - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index", "_doc", "id"); indexRequest.source(source); DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED; UpdateHelper.Result translate = new UpdateHelper.Result(indexRequest, docWriteResult, new HashMap(), XContentType.JSON); UpdateHelper updateHelper = new MockUpdateHelper(translate); - UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id"); updateRequest.upsert(source); BulkItemResultHolder holder = TransportShardBulkAction.executeUpdateRequestOnce(updateRequest, shard, metaData, - "index", updateHelper, threadPool::absoluteTimeInMillis, primaryRequest, 0, new NoopMappingUpdatePerformer()); + "index", updateHelper, threadPool::absoluteTimeInMillis, primaryRequest, 0, + new ThrowingMappingUpdatePerformer(new RuntimeException())); assertFalse(holder.isVersionConflict()); assertNotNull(holder.response); @@ -785,7 +747,7 @@ public void testExecuteUpdateRequestOnceWithFailure() throws Exception { source.put("foo", "bar"); BulkItemRequest[] items = new BulkItemRequest[1]; boolean create = randomBoolean(); - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + DocWriteRequest writeRequest = new IndexRequest("index", "_doc", "id") .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar") .create(create); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); @@ -794,13 +756,13 @@ public void testExecuteUpdateRequestOnceWithFailure() throws Exception { new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Translog.Location location = new Translog.Location(0, 0, 0); - IndexRequest indexRequest = new IndexRequest("index", "type", "id"); + IndexRequest indexRequest = new IndexRequest("index", "_doc", "id"); indexRequest.source(source); DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED; Exception prepareFailure = new IllegalArgumentException("I failed to do something!"); UpdateHelper updateHelper = new FailingUpdateHelper(prepareFailure); - UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); + UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id"); updateRequest.upsert(source); BulkItemResultHolder holder = TransportShardBulkAction.executeUpdateRequestOnce(updateRequest, shard, metaData, @@ -812,7 +774,7 @@ public void testExecuteUpdateRequestOnceWithFailure() throws Exception { assertNotNull(holder.replicaRequest); Engine.IndexResult opResult = (Engine.IndexResult) holder.operationResult; - assertTrue(opResult.hasFailure()); + assertThat(opResult.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertFalse(opResult.isCreated()); Exception e = opResult.getFailure(); assertThat(e.getMessage(), containsString("I failed to do something!")); @@ -822,7 +784,7 @@ public void testExecuteUpdateRequestOnceWithFailure() throws Exception { assertThat(replicaBulkRequest.request(), instanceOf(IndexRequest.class)); IndexRequest replicaRequest = (IndexRequest) replicaBulkRequest.request(); assertThat(replicaRequest.index(), equalTo("index")); - assertThat(replicaRequest.type(), equalTo("type")); + assertThat(replicaRequest.type(), equalTo("_doc")); assertThat(replicaRequest.id(), equalTo("id")); assertThat(replicaRequest.sourceAsMap(), equalTo(source)); @@ -889,9 +851,6 @@ public Translog.Location getTranslogLocation() { public static class NoopMappingUpdatePerformer implements MappingUpdatePerformer { public void updateMappings(Mapping update, ShardId shardId, String type) { } - - public void verifyMappings(Mapping update, ShardId shardId) { - } } /** Always throw the given exception */ @@ -904,24 +863,5 @@ private class ThrowingMappingUpdatePerformer implements MappingUpdatePerformer { public void updateMappings(Mapping update, ShardId shardId, String type) { throw e; } - - public void verifyMappings(Mapping update, ShardId shardId) { - fail("should not have gotten to this point"); - } - } - - /** Always throw the given exception */ - private class ThrowingVerifyingMappingUpdatePerformer implements MappingUpdatePerformer { - private final RuntimeException e; - ThrowingVerifyingMappingUpdatePerformer(RuntimeException e) { - this.e = e; - } - - public void updateMappings(Mapping update, ShardId shardId, String type) { - } - - public void verifyMappings(Mapping update, ShardId shardId) { - throw e; - } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 29e3080bfe1ca..a5d865a274140 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -640,7 +640,7 @@ public MetaData.Builder remove(MetaData.Builder builder, String name) { @Override public MetaData.Custom randomCreate(String name) { if (randomBoolean()) { - return new RepositoriesMetaData(); + return new RepositoriesMetaData(Collections.emptyList()); } else { return IndexGraveyardTests.createRandom(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 129b347889504..5accb3aba3ca4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -389,8 +390,7 @@ private ClusterState executeTask() throws Exception { setupRequest(); final MetaDataCreateIndexService.IndexCreationTask task = new MetaDataCreateIndexService.IndexCreationTask( logger, allocationService, request, listener, indicesService, aliasValidator, xContentRegistry, clusterStateSettings.build(), - validator - ); + validator, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); return task.execute(state); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 1f4ed5eda78c4..b232c8132ae64 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.cluster.ClusterName; @@ -34,24 +35,29 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; +import java.util.Set; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; -import static java.util.Collections.min; import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; public class MetaDataCreateIndexServiceTests extends ESTestCase { @@ -228,47 +234,146 @@ public void testValidateSplitIndex() { Settings.builder().put("index.number_of_shards", targetShards).build()); } - public void testResizeIndexSettings() { - String indexName = randomAlphaOfLength(10); - List versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()), - VersionUtils.randomVersion(random())); + public void testPrepareResizeIndexSettings() { + final List versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random())); versions.sort(Comparator.comparingLong(l -> l.id)); - Version version = versions.get(0); - Version minCompat = versions.get(1); - Version upgraded = versions.get(2); - // create one that won't fail - ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0, - Settings.builder() + final Version version = versions.get(0); + final Version upgraded = versions.get(1); + final Settings indexSettings = + Settings.builder() + .put("index.version.created", version) + .put("index.version.upgraded", upgraded) + .put("index.similarity.default.type", "BM25") + .put("index.analysis.analyzer.default.tokenizer", "keyword") + .build(); + runPrepareResizeIndexSettingsTest( + indexSettings, + Settings.EMPTY, + Collections.emptyList(), + randomBoolean(), + settings -> { + assertThat("similarity settings must be copied", settings.get("index.similarity.default.type"), equalTo("BM25")); + assertThat( + "analysis settings must be copied", + settings.get("index.analysis.analyzer.default.tokenizer"), + equalTo("keyword")); + assertThat(settings.get("index.routing.allocation.initial_recovery._id"), equalTo("node1")); + assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); + assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); + assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); + }); + } + + public void testPrepareResizeIndexSettingsCopySettings() { + final int maxMergeCount = randomIntBetween(1, 16); + final int maxThreadCount = randomIntBetween(1, 16); + final Setting nonCopyableExistingIndexSetting = + Setting.simpleString("index.non_copyable.existing", Setting.Property.IndexScope, Setting.Property.NotCopyableOnResize); + final Setting nonCopyableRequestIndexSetting = + Setting.simpleString("index.non_copyable.request", Setting.Property.IndexScope, Setting.Property.NotCopyableOnResize); + runPrepareResizeIndexSettingsTest( + Settings.builder() + .put("index.merge.scheduler.max_merge_count", maxMergeCount) + .put("index.non_copyable.existing", "existing") + .build(), + Settings.builder() + .put("index.blocks.write", (String) null) + .put("index.merge.scheduler.max_thread_count", maxThreadCount) + .put("index.non_copyable.request", "request") + .build(), + Arrays.asList(nonCopyableExistingIndexSetting, nonCopyableRequestIndexSetting), + true, + settings -> { + assertNull(settings.getAsBoolean("index.blocks.write", null)); + assertThat(settings.get("index.routing.allocation.require._name"), equalTo("node1")); + assertThat(settings.getAsInt("index.merge.scheduler.max_merge_count", null), equalTo(maxMergeCount)); + assertThat(settings.getAsInt("index.merge.scheduler.max_thread_count", null), equalTo(maxThreadCount)); + assertNull(settings.get("index.non_copyable.existing")); + assertThat(settings.get("index.non_copyable.request"), equalTo("request")); + }); + } + + public void testPrepareResizeIndexSettingsAnalysisSettings() { + // analysis settings from the request are not overwritten + runPrepareResizeIndexSettingsTest( + Settings.EMPTY, + Settings.builder().put("index.analysis.analyzer.default.tokenizer", "whitespace").build(), + Collections.emptyList(), + randomBoolean(), + settings -> + assertThat( + "analysis settings are not overwritten", + settings.get("index.analysis.analyzer.default.tokenizer"), + equalTo("whitespace")) + ); + + } + + public void testPrepareResizeIndexSettingsSimilaritySettings() { + // similarity settings from the request are not overwritten + runPrepareResizeIndexSettingsTest( + Settings.EMPTY, + Settings.builder().put("index.similarity.sim.type", "DFR").build(), + Collections.emptyList(), + randomBoolean(), + settings -> + assertThat("similarity settings are not overwritten", settings.get("index.similarity.sim.type"), equalTo("DFR"))); + + } + + private void runPrepareResizeIndexSettingsTest( + final Settings sourceSettings, + final Settings requestSettings, + final Collection> additionalIndexScopedSettings, + final boolean copySettings, + final Consumer consumer) { + final String indexName = randomAlphaOfLength(10); + + final Settings indexSettings = Settings.builder() .put("index.blocks.write", true) - .put("index.similarity.default.type", "BM25") - .put("index.version.created", version) - .put("index.version.upgraded", upgraded) - .put("index.version.minimum_compatible", minCompat.luceneVersion.toString()) - .put("index.analysis.analyzer.my_analyzer.tokenizer", "keyword") - .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) - .build(); - AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, - Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), - new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + .put("index.routing.allocation.require._name", "node1") + .put(sourceSettings) + .build(); + + final ClusterState initialClusterState = + ClusterState + .builder(createClusterState(indexName, randomIntBetween(2, 10), 0, indexSettings)) + .nodes(DiscoveryNodes.builder().add(newNode("node1"))) + .build(); + + final AllocationService service = new AllocationService( + Settings.builder().build(), + new AllocationDeciders(Settings.EMPTY, + Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE); + + final RoutingTable initialRoutingTable = service.reroute(initialClusterState, "reroute").routingTable(); + final ClusterState routingTableClusterState = ClusterState.builder(initialClusterState).routingTable(initialRoutingTable).build(); - RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); - clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard - routingTable = service.applyStartedShards(clusterState, - routingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); - clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - - Settings.Builder builder = Settings.builder(); - builder.put("index.number_of_shards", 1); - MetaDataCreateIndexService.prepareResizeIndexSettings(clusterState, Collections.emptySet(), builder, - clusterState.metaData().index(indexName).getIndex(), "target", ResizeType.SHRINK); - assertEquals("similarity settings must be copied", "BM25", builder.build().get("index.similarity.default.type")); - assertEquals("analysis settings must be copied", - "keyword", builder.build().get("index.analysis.analyzer.my_analyzer.tokenizer")); - assertEquals("node1", builder.build().get("index.routing.allocation.initial_recovery._id")); - assertEquals("1", builder.build().get("index.allocation.max_retries")); - assertEquals(version, builder.build().getAsVersion("index.version.created", null)); - assertEquals(upgraded, builder.build().getAsVersion("index.version.upgraded", null)); + final RoutingTable routingTable = service.applyStartedShards( + routingTableClusterState, + initialRoutingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + final ClusterState clusterState = ClusterState.builder(routingTableClusterState).routingTable(routingTable).build(); + + final Settings.Builder indexSettingsBuilder = Settings.builder().put("index.number_of_shards", 1).put(requestSettings); + final Set> settingsSet = + Stream.concat( + IndexScopedSettings.BUILT_IN_INDEX_SETTINGS.stream(), + additionalIndexScopedSettings.stream()) + .collect(Collectors.toSet()); + MetaDataCreateIndexService.prepareResizeIndexSettings( + clusterState, + Collections.emptySet(), + indexSettingsBuilder, + clusterState.metaData().index(indexName).getIndex(), + "target", + ResizeType.SHRINK, + copySettings, + new IndexScopedSettings(Settings.EMPTY, settingsSet)); + consumer.accept(indexSettingsBuilder.build()); } private DiscoveryNode newNode(String nodeId) { diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 187c0e21b4d42..1ab92526e3130 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -722,12 +722,19 @@ public void testRejectNullProperties() { assertThat(ex.getMessage(), containsString("properties cannot be null for setting")); } - public void testRejectConflictProperties() { + public void testRejectConflictingDynamicAndFinalProperties() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> Setting.simpleString("foo.bar", Property.Final, Property.Dynamic)); assertThat(ex.getMessage(), containsString("final setting [foo.bar] cannot be dynamic")); } + public void testRejectNonIndexScopedNotCopyableOnResizeSetting() { + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> Setting.simpleString("foo.bar", Property.NotCopyableOnResize)); + assertThat(e, hasToString(containsString("non-index-scoped setting [foo.bar] can not have property [NotCopyableOnResize]"))); + } + public void testTimeValue() { final TimeValue random = TimeValue.parseTimeValue(randomTimeValue(), "test"); diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 43e3b2ef01b67..f7716c6f146ff 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -20,6 +20,10 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,14 +34,17 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; +import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -449,6 +456,56 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { } + @TestLogging( + "_root:DEBUG," + + "org.elasticsearch.action.bulk:TRACE," + + "org.elasticsearch.action.get:TRACE," + + "org.elasticsearch.cluster.service:TRACE," + + "org.elasticsearch.discovery:TRACE," + + "org.elasticsearch.indices.cluster:TRACE," + + "org.elasticsearch.indices.recovery:TRACE," + + "org.elasticsearch.index.seqno:TRACE," + + "org.elasticsearch.index.shard:TRACE") + public void testMappingTimeout() throws Exception { + startCluster(3); + createIndex("test", Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put("index.routing.allocation.exclude._name", internalCluster().getMasterName()) + .build()); + + // create one field + index("test", "doc", "1", "{ \"f\": 1 }"); + + ensureGreen(); + + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings( + Settings.builder().put("indices.mapping.dynamic_timeout", "1ms"))); + + ServiceDisruptionScheme disruption = new BlockMasterServiceOnMaster(random()); + setDisruptionScheme(disruption); + + disruption.startDisrupting(); + + BulkRequestBuilder bulk = client().prepareBulk(); + bulk.add(client().prepareIndex("test", "doc", "2").setSource("{ \"f\": 1 }", XContentType.JSON)); + bulk.add(client().prepareIndex("test", "doc", "3").setSource("{ \"g\": 1 }", XContentType.JSON)); + bulk.add(client().prepareIndex("test", "doc", "4").setSource("{ \"f\": 1 }", XContentType.JSON)); + BulkResponse bulkResponse = bulk.get(); + assertTrue(bulkResponse.hasFailures()); + + disruption.stopDisrupting(); + + assertBusy(() -> { + IndicesStatsResponse stats = client().admin().indices().prepareStats("test").clear().get(); + for (ShardStats shardStats : stats.getShards()) { + assertThat(shardStats.getShardRouting().toString(), + shardStats.getSeqNoStats().getGlobalCheckpoint(), equalTo(shardStats.getSeqNoStats().getLocalCheckpoint())); + } + }); + + } + void assertDiscoveryCompleted(List nodes) throws InterruptedException { for (final String node : nodes) { assertTrue( diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index b75297f518325..d7dd5b3b4f648 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1381,7 +1381,7 @@ public void testVersioningCreateExistsException() throws IOException { create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); indexResult = engine.index(create); - assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } @@ -1519,13 +1519,13 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion Engine.IndexResult result = engine.index(indexWithVersion.apply(conflictingVersion, index)); assertThat(result.isCreated(), equalTo(false)); assertThat(result.getVersion(), equalTo(lastOpVersion)); - assertThat(result.hasFailure(), equalTo(true)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); } else { Engine.IndexResult result = engine.index(versionedOp ? indexWithVersion.apply(correctVersion, index) : index); assertThat(result.isCreated(), equalTo(docDeleted)); assertThat(result.getVersion(), equalTo(Math.max(lastOpVersion + 1, 1))); - assertThat(result.hasFailure(), equalTo(false)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); assertThat(result.getFailure(), nullValue()); lastFieldValue = index.docs().get(0).get("value"); docDeleted = false; @@ -1539,13 +1539,13 @@ private int assertOpsOnPrimary(List ops, long currentOpVersion Engine.DeleteResult result = engine.delete(delWithVersion.apply(conflictingVersion, delete)); assertThat(result.isFound(), equalTo(docDeleted == false)); assertThat(result.getVersion(), equalTo(lastOpVersion)); - assertThat(result.hasFailure(), equalTo(true)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); } else { Engine.DeleteResult result = engine.delete(versionedOp ? delWithVersion.apply(correctVersion, delete) : delete); assertThat(result.isFound(), equalTo(docDeleted == false)); assertThat(result.getVersion(), equalTo(Math.max(lastOpVersion + 1, 1))); - assertThat(result.hasFailure(), equalTo(false)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); assertThat(result.getFailure(), nullValue()); docDeleted = true; lastOpVersion = result.getVersion(); @@ -1623,14 +1623,14 @@ public void testNonInternalVersioningOnPrimary() throws IOException { assertThat(result.getSeqNo(), equalTo(seqNo)); assertThat(result.isCreated(), equalTo(docDeleted)); assertThat(result.getVersion(), equalTo(op.version())); - assertThat(result.hasFailure(), equalTo(false)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); assertThat(result.getFailure(), nullValue()); docDeleted = false; highestOpVersion = op.version(); } else { assertThat(result.isCreated(), equalTo(false)); assertThat(result.getVersion(), equalTo(highestOpVersion)); - assertThat(result.hasFailure(), equalTo(true)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); } } else { @@ -1641,14 +1641,14 @@ public void testNonInternalVersioningOnPrimary() throws IOException { assertThat(result.getSeqNo(), equalTo(seqNo)); assertThat(result.isFound(), equalTo(docDeleted == false)); assertThat(result.getVersion(), equalTo(op.version())); - assertThat(result.hasFailure(), equalTo(false)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); assertThat(result.getFailure(), nullValue()); docDeleted = true; highestOpVersion = op.version(); } else { assertThat(result.isFound(), equalTo(docDeleted == false)); assertThat(result.getVersion(), equalTo(highestOpVersion)); - assertThat(result.hasFailure(), equalTo(true)); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(result.getFailure(), instanceOf(VersionConflictEngineException.class)); } } @@ -1761,7 +1761,7 @@ class OpAndVersion { get.version(), VersionType.INTERNAL, PRIMARY, System.currentTimeMillis(), -1, false); Engine.IndexResult indexResult = engine.index(index); - if (indexResult.hasFailure() == false) { + if (indexResult.getResultType() == Engine.Result.Type.SUCCESS) { history.add(new OpAndVersion(indexResult.getVersion(), removed, added)); } @@ -1903,7 +1903,7 @@ public void testSeqNoAndCheckpoints() throws IOException { "test", id, newUid(id), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0); final Engine.DeleteResult result = initialEngine.delete(delete); - if (!result.hasFailure()) { + if (result.getResultType() == Engine.Result.Type.SUCCESS) { assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1)); assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo + 1)); indexedIds.remove(id); @@ -1921,7 +1921,7 @@ public void testSeqNoAndCheckpoints() throws IOException { rarely() ? 100 : Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0, -1, false); final Engine.IndexResult result = initialEngine.index(index); - if (!result.hasFailure()) { + if (result.getResultType() == Engine.Result.Type.SUCCESS) { assertThat(result.getSeqNo(), equalTo(primarySeqNo + 1)); assertThat(initialEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo + 1)); indexedIds.add(id); @@ -2195,7 +2195,7 @@ public void testEnableGcDeletes() throws Exception { // Try to index uid=1 with a too-old version, should fail: Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(index); - assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // Get should still not find the document @@ -2205,7 +2205,7 @@ public void testEnableGcDeletes() throws Exception { // Try to index uid=2 with a too-old version, should fail: Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); indexResult = engine.index(index1); - assertTrue(indexResult.hasFailure()); + assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); // Get should not find the document @@ -3084,7 +3084,7 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); indexResult = replicaEngine.index(index); - assertThat(indexResult.hasFailure(), equalTo(false)); + assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 256e1f57d3f8d..0fccd625b764c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -236,7 +236,7 @@ public void testDefaultPositionIncrementGap() throws IOException { IndexShard shard = indexService.getShard(0); shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, - sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); @@ -278,7 +278,7 @@ public void testPositionIncrementGap() throws IOException { IndexShard shard = indexService.getShard(0); shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, - sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); + sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader(); diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 2d2aaac7bbd26..736dc40e6867d 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -251,7 +251,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { // test only primary shards.startPrimary(); BulkItemResponse response = shards.index( - new IndexRequest(index.getName(), "testDocumentFailureReplication", "1") + new IndexRequest(index.getName(), "type", "1") .source("{}", XContentType.JSON) ); assertTrue(response.isFailed()); @@ -265,7 +265,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { } shards.startReplicas(nReplica); response = shards.index( - new IndexRequest(index.getName(), "testDocumentFailureReplication", "1") + new IndexRequest(index.getName(), "type", "1") .source("{}", XContentType.JSON) ); assertTrue(response.isFailed()); @@ -281,7 +281,7 @@ public void testRequestFailureReplication() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); BulkItemResponse response = shards.index( - new IndexRequest(index.getName(), "testRequestFailureException", "1") + new IndexRequest(index.getName(), "type", "1") .source("{}", XContentType.JSON) .version(2) ); @@ -300,7 +300,7 @@ public void testRequestFailureReplication() throws Exception { } shards.startReplicas(nReplica); response = shards.index( - new IndexRequest(index.getName(), "testRequestFailureException", "1") + new IndexRequest(index.getName(), "type", "1") .source("{}", XContentType.JSON) .version(2) ); diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 38ceb7b7a215b..21be1da3845b6 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexableField; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.index.IndexRequest; @@ -33,6 +32,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; @@ -184,8 +184,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { VersionType.EXTERNAL, randomNonNegativeLong(), false, - SourceToParse.source("index", "type", "replica", new BytesArray("{}"), XContentType.JSON), - mapping -> {}); + SourceToParse.source("index", "type", "replica", new BytesArray("{}"), XContentType.JSON)); shards.promoteReplicaToPrimary(promotedReplica).get(); oldPrimary.close("demoted", randomBoolean()); oldPrimary.store().close(); @@ -200,9 +199,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { VersionType.INTERNAL, SourceToParse.source("index", "type", "primary", new BytesArray("{}"), XContentType.JSON), randomNonNegativeLong(), - false, - mapping -> { - }); + false); } final IndexShard recoveredReplica = shards.addReplicaWithExistingPath(remainingReplica.shardPath(), remainingReplica.routingEntry().currentNodeId()); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index e2e79368b2355..e8f396800458b 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -94,6 +94,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -101,7 +102,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog; public class IndexShardIT extends ESSingleNodeTestCase { @@ -336,7 +336,7 @@ public void testMaybeFlush() throws Exception { assertFalse(shard.shouldPeriodicallyFlush()); shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); assertTrue(shard.shouldPeriodicallyFlush()); final Translog translog = getTranslog(shard); assertEquals(2, translog.stats().getUncommittedOperations()); @@ -373,7 +373,7 @@ public void testMaybeRollTranslogGeneration() throws Exception { .put("index.number_of_shards", 1) .put("index.translog.generation_threshold_size", generationThreshold + "b") .build(); - createIndex("test", settings); + createIndex("test", settings, "test"); ensureGreen("test"); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); final IndexService test = indicesService.indexService(resolveIndex("test")); @@ -386,7 +386,7 @@ public void testMaybeRollTranslogGeneration() throws Exception { assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); final Translog.Location location = result.getTranslogLocation(); shard.afterWriteOperation(); if (location.translogLocation + location.size > generationThreshold) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 1bdc54b982e7c..b0298ee88716c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -83,7 +83,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -1063,7 +1062,7 @@ public void onFailure(Exception e) { */ public void testSnapshotStore() throws IOException { final IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0"); + indexDoc(shard, "_doc", "0"); flushShard(shard); final IndexShard newShard = reinitShard(shard); @@ -1139,9 +1138,9 @@ public void testMinimumCompatVersion() throws IOException { IndexShard test = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(test); - indexDoc(test, "test", "test"); + indexDoc(test, "_doc", "test"); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); - indexDoc(test, "test", "test"); + indexDoc(test, "_doc", "test"); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); test.getEngine().flush(); assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion()); @@ -1188,19 +1187,19 @@ public void testRefreshMetric() throws IOException { long initialTotalTime = shard.refreshStats().getTotalTimeInMillis(); // check time advances for (int i = 1; shard.refreshStats().getTotalTimeInMillis() == initialTotalTime; i++) { - indexDoc(shard, "test", "test"); + indexDoc(shard, "_doc", "test"); assertThat(shard.refreshStats().getTotal(), equalTo(2L + i - 1)); shard.refresh("test"); assertThat(shard.refreshStats().getTotal(), equalTo(2L + i)); assertThat(shard.refreshStats().getTotalTimeInMillis(), greaterThanOrEqualTo(initialTotalTime)); } long refreshCount = shard.refreshStats().getTotal(); - indexDoc(shard, "test", "test"); + indexDoc(shard, "_doc", "test"); try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) { assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1)); } - indexDoc(shard, "test", "test"); + indexDoc(shard, "_doc", "test"); shard.writeIndexingBuffer(); assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+2)); closeShards(shard); @@ -1208,7 +1207,7 @@ public void testRefreshMetric() throws IOException { public void testIndexingOperationsListeners() throws IOException { IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); shard.updateLocalCheckpointForShard(shard.shardRouting.allocationId().getId(), 0); AtomicInteger preIndex = new AtomicInteger(); AtomicInteger postIndexCreate = new AtomicInteger(); @@ -1227,14 +1226,19 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { @Override public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { - if (result.hasFailure() == false) { - if (result.isCreated()) { - postIndexCreate.incrementAndGet(); - } else { - postIndexUpdate.incrementAndGet(); - } - } else { - postIndex(shardId, index, result.getFailure()); + switch (result.getResultType()) { + case SUCCESS: + if (result.isCreated()) { + postIndexCreate.incrementAndGet(); + } else { + postIndexUpdate.incrementAndGet(); + } + break; + case FAILURE: + postIndex(shardId, index, result.getFailure()); + break; + default: + fail("unexpected result type:" + result.getResultType()); } } @@ -1251,10 +1255,15 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { @Override public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) { - if (result.hasFailure() == false) { - postDelete.incrementAndGet(); - } else { - postDelete(shardId, delete, result.getFailure()); + switch (result.getResultType()) { + case SUCCESS: + postDelete.incrementAndGet(); + break; + case FAILURE: + postDelete(shardId, delete, result.getFailure()); + break; + default: + fail("unexpected result type:" + result.getResultType()); } } @@ -1266,7 +1275,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { }); recoverShardFromStore(shard); - indexDoc(shard, "test", "1"); + indexDoc(shard, "_doc", "1"); assertEquals(1, preIndex.get()); assertEquals(1, postIndexCreate.get()); assertEquals(0, postIndexUpdate.get()); @@ -1275,7 +1284,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(0, postDelete.get()); assertEquals(0, postDeleteException.get()); - indexDoc(shard, "test", "1"); + indexDoc(shard, "_doc", "1"); assertEquals(2, preIndex.get()); assertEquals(1, postIndexCreate.get()); assertEquals(1, postIndexUpdate.get()); @@ -1284,7 +1293,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(0, postDelete.get()); assertEquals(0, postDeleteException.get()); - deleteDoc(shard, "test", "1"); + deleteDoc(shard, "_doc", "1"); assertEquals(2, preIndex.get()); assertEquals(1, postIndexCreate.get()); @@ -1298,7 +1307,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { shard.state = IndexShardState.STARTED; // It will generate exception try { - indexDoc(shard, "test", "1"); + indexDoc(shard, "_doc", "1"); fail(); } catch (AlreadyClosedException e) { @@ -1312,7 +1321,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(1, postDelete.get()); assertEquals(0, postDeleteException.get()); try { - deleteDoc(shard, "test", "1"); + deleteDoc(shard, "_doc", "1"); fail(); } catch (AlreadyClosedException e) { @@ -1458,7 +1467,7 @@ public void testRelocatedShardCanNotBeRevived() throws IOException, InterruptedE closeShards(shard); } - public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOException, InterruptedException { + public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOException { final IndexShard shard = newStartedShard(true); final ShardRouting originalRouting = shard.routingEntry(); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); @@ -1531,19 +1540,18 @@ public void testRecoverFromStoreWithOutOfOrderDelete() throws IOException { * - If flush and then recover from the existing store, delete #1 will be removed while index #0 is still retained and replayed. */ final IndexShard shard = newStartedShard(false); - final Consumer mappingConsumer = getMappingUpdater(shard, "test"); - shard.applyDeleteOperationOnReplica(1, 2, "test", "id", VersionType.EXTERNAL, mappingConsumer); + shard.applyDeleteOperationOnReplica(1, 2, "_doc", "id", VersionType.EXTERNAL); shard.getEngine().rollTranslogGeneration(); // isolate the delete in it's own generation shard.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(shard.shardId().getIndexName(), "test", "id", new BytesArray("{}"), XContentType.JSON), mappingConsumer); + SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id", new BytesArray("{}"), XContentType.JSON)); shard.applyIndexOperationOnReplica(3, 3, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(shard.shardId().getIndexName(), "test", "id-3", new BytesArray("{}"), XContentType.JSON), mappingConsumer); + SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id-3", new BytesArray("{}"), XContentType.JSON)); // Flushing a new commit with local checkpoint=1 allows to skip the translog gen #1 in recovery. shard.flush(new FlushRequest().force(true).waitIfOngoing(true)); shard.applyIndexOperationOnReplica(2, 3, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(shard.shardId().getIndexName(), "test", "id-2", new BytesArray("{}"), XContentType.JSON), mappingConsumer); + SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id-2", new BytesArray("{}"), XContentType.JSON)); shard.applyIndexOperationOnReplica(5, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(shard.shardId().getIndexName(), "test", "id-5", new BytesArray("{}"), XContentType.JSON), mappingConsumer); + SourceToParse.source(shard.shardId().getIndexName(), "_doc", "id-5", new BytesArray("{}"), XContentType.JSON)); final int translogOps; if (randomBoolean()) { @@ -1580,7 +1588,7 @@ public void testRecoverFromStore() throws IOException { int totalOps = randomInt(10); int translogOps = totalOps; for (int i = 0; i < totalOps; i++) { - indexDoc(shard, "test", Integer.toString(i)); + indexDoc(shard, "_doc", Integer.toString(i)); } if (randomBoolean()) { shard.updateLocalCheckpointForShard(shard.shardRouting.allocationId().getId(), totalOps - 1); @@ -1608,7 +1616,7 @@ public void testPrimaryHandOffUpdatesLocalCheckpoint() throws IOException { final IndexShard primarySource = newStartedShard(true); int totalOps = randomInt(10); for (int i = 0; i < totalOps; i++) { - indexDoc(primarySource, "test", Integer.toString(i)); + indexDoc(primarySource, "_doc", Integer.toString(i)); } IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); final IndexShard primaryTarget = newShard(primarySource.routingEntry().getTargetRelocatingShard()); @@ -1626,15 +1634,15 @@ public void testPrimaryHandOffUpdatesLocalCheckpoint() throws IOException { /* This test just verifies that we fill up local checkpoint up to max seen seqID on primary recovery */ public void testRecoverFromStoreWithNoOps() throws IOException { final IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0"); - Engine.IndexResult test = indexDoc(shard, "test", "1"); + indexDoc(shard, "_doc", "0"); + Engine.IndexResult test = indexDoc(shard, "_doc", "1"); // start a replica shard and index the second doc final IndexShard otherShard = newStartedShard(false); updateMappings(otherShard, shard.indexSettings().getIndexMetaData()); - SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), "test", "1", + SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), "_doc", "1", new BytesArray("{}"), XContentType.JSON); otherShard.applyIndexOperationOnReplica(1, 1, - VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, update -> {}); + VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); final ShardRouting primaryShardRouting = shard.routingEntry(); IndexShard newShard = reinitShard(otherShard, ShardRoutingHelper.initWithSameId(primaryShardRouting, @@ -1676,7 +1684,7 @@ public void testRecoverFromStoreWithNoOps() throws IOException { public void testRecoverFromCleanStore() throws IOException { final IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0"); + indexDoc(shard, "_doc", "0"); if (randomBoolean()) { flushShard(shard); } @@ -1699,7 +1707,7 @@ public void testRecoverFromCleanStore() throws IOException { public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception { final IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0"); + indexDoc(shard, "_doc", "0"); if (randomBoolean()) { flushShard(shard); } @@ -1739,7 +1747,7 @@ public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception { assertDocCount(newShard, 0); // we can't issue this request through a client because of the inconsistencies we created with the cluster state // doing it directly instead - indexDoc(newShard, "test", "0"); + indexDoc(newShard, "_doc", "0"); newShard.refresh("test"); assertDocCount(newShard, 1); @@ -1749,21 +1757,20 @@ public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception { public void testRecoverFromStoreRemoveStaleOperations() throws Exception { final IndexShard shard = newStartedShard(false); final String indexName = shard.shardId().getIndexName(); - final Consumer mapping = getMappingUpdater(shard, "doc"); // Index #0, index #1 shard.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(indexName, "doc", "doc-0", new BytesArray("{}"), XContentType.JSON), mapping); + SourceToParse.source(indexName, "_doc", "doc-0", new BytesArray("{}"), XContentType.JSON)); flushShard(shard); shard.updateGlobalCheckpointOnReplica(0, "test"); // stick the global checkpoint here. shard.applyIndexOperationOnReplica(1, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(indexName, "doc", "doc-1", new BytesArray("{}"), XContentType.JSON), mapping); + SourceToParse.source(indexName, "_doc", "doc-1", new BytesArray("{}"), XContentType.JSON)); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1")); // Simulate resync (without rollback): Noop #1, index #2 acquireReplicaOperationPermitBlockingly(shard, shard.primaryTerm + 1); shard.markSeqNoAsNoop(1, "test"); shard.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(indexName, "doc", "doc-2", new BytesArray("{}"), XContentType.JSON), mapping); + SourceToParse.source(indexName, "_doc", "doc-2", new BytesArray("{}"), XContentType.JSON)); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1", "doc-2")); // Recovering from store should discard doc #1 @@ -1800,11 +1807,11 @@ public void testRestoreShard() throws IOException { final IndexShard source = newStartedShard(true); IndexShard target = newStartedShard(true); - indexDoc(source, "test", "0"); + indexDoc(source, "_doc", "0"); if (randomBoolean()) { source.refresh("test"); } - indexDoc(target, "test", "1"); + indexDoc(target, "_doc", "1"); target.refresh("test"); assertDocs(target, "1"); flushShard(source); // only flush source @@ -1850,8 +1857,8 @@ public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version versio public void testSearcherWrapperIsUsed() throws IOException { IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); - indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}"); + indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); + indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("test"); Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); @@ -1915,14 +1922,14 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\", \"fielddata\": true }}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\", \"fielddata\": true }}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, wrapper); recoverShardFromStore(shard); - indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); shard.refresh("created segment 1"); - indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}"); + indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("created segment 2"); // test global ordinals are evicted @@ -1955,9 +1962,9 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { public void testIndexingOperationListenersIsInvokedOnRecovery() throws IOException { IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); - deleteDoc(shard, "test", "0"); - indexDoc(shard, "test", "1", "{\"foo\" : \"bar\"}"); + indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); + deleteDoc(shard, "_doc", "0"); + indexDoc(shard, "_doc", "1", "{\"foo\" : \"bar\"}"); shard.refresh("test"); final AtomicInteger preIndex = new AtomicInteger(); @@ -2007,7 +2014,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul public void testSearchIsReleaseIfWrapperFails() throws IOException { IndexShard shard = newStartedShard(true); - indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); shard.refresh("test"); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @Override @@ -2043,13 +2050,13 @@ public void testTranslogRecoverySyncsTranslog() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(primary); - indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null); recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> { @@ -2071,7 +2078,7 @@ public void testRecoverFromTranslog() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, randomLongBetween(1, Long.MAX_VALUE)).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); @@ -2080,11 +2087,11 @@ public void testRecoverFromTranslog() throws IOException { int numCorruptEntries = 0; for (int i = 0; i < numTotalEntries; i++) { if (randomBoolean()) { - operations.add(new Translog.Index("test", "1", 0, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, + operations.add(new Translog.Index("_doc", "1", 0, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, "{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, null, -1)); } else { // corrupt entry - operations.add(new Translog.Index("test", "2", 1, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, + operations.add(new Translog.Index("_doc", "2", 1, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, "{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, null, -1)); numCorruptEntries++; } @@ -2124,7 +2131,7 @@ public Translog.Operation next() throws IOException { public void testShardActiveDuringInternalRecovery() throws IOException { IndexShard shard = newStartedShard(true); - indexDoc(shard, "type", "0"); + indexDoc(shard, "_doc", "0"); shard = reinitShard(shard); DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); shard.markAsRecovering("for testing", new RecoveryState(shard.routingEntry(), localNode, null)); @@ -2145,13 +2152,13 @@ public void testShardActiveDuringPeerRecovery() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(primary); - indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); IndexShard replica = newShard(primary.shardId(), false, "n2", metaData, null); DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); @@ -2178,13 +2185,13 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(primary); - indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); Consumer assertListenerCalled = shard -> { AtomicBoolean called = new AtomicBoolean(); shard.addRefreshListener(null, b -> { @@ -2230,15 +2237,15 @@ public void testRecoverFromLocalShard() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("source") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard sourceShard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(sourceShard); - indexDoc(sourceShard, "test", "0", "{\"foo\" : \"bar\"}"); - indexDoc(sourceShard, "test", "1", "{\"foo\" : \"bar\"}"); + indexDoc(sourceShard, "_doc", "0", "{\"foo\" : \"bar\"}"); + indexDoc(sourceShard, "_doc", "1", "{\"foo\" : \"bar\"}"); sourceShard.refresh("test"); @@ -2290,8 +2297,8 @@ public void testRecoverFromLocalShard() throws IOException { closeShards(newShard); } - assertThat(requestedMappingUpdates, hasKey("test")); - assertThat(requestedMappingUpdates.get("test").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}")); + assertThat(requestedMappingUpdates, hasKey("_doc")); + assertThat(requestedMappingUpdates.get("_doc").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}")); closeShards(sourceShard, targetShard); } @@ -2306,7 +2313,7 @@ public void testDocStats() throws IOException { final long numDocsToDelete = randomIntBetween((int) Math.ceil(Math.nextUp(numDocs / 10.0)), Math.toIntExact(numDocs)); for (int i = 0; i < numDocs; i++) { final String id = Integer.toString(i); - indexDoc(indexShard, "test", id); + indexDoc(indexShard, "_doc", id); } if (randomBoolean()) { indexShard.refresh("test"); @@ -2328,8 +2335,8 @@ public void testDocStats() throws IOException { IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList())); for (final Integer i : ids) { final String id = Integer.toString(i); - deleteDoc(indexShard, "test", id); - indexDoc(indexShard, "test", id); + deleteDoc(indexShard, "_doc", id); + indexDoc(indexShard, "_doc", id); } // flush the buffered deletes @@ -2386,7 +2393,7 @@ public void testEstimateTotalDocSize() throws Exception { .field("point", randomFloat()) .field("description", randomUnicodeOfCodepointLength(100)) .endObject()); - indexDoc(indexShard, "doc", Integer.toString(i), doc); + indexDoc(indexShard, "_doc", Integer.toString(i), doc); } assertThat("Without flushing, segment sizes should be zero", @@ -2413,7 +2420,7 @@ public void testEstimateTotalDocSize() throws Exception { if (randomBoolean()) { deleteDoc(indexShard, "doc", Integer.toString(i)); } else { - indexDoc(indexShard, "doc", Integer.toString(i), "{\"foo\": \"bar\"}"); + indexDoc(indexShard, "_doc", Integer.toString(i), "{\"foo\": \"bar\"}"); } } if (randomBoolean()) { @@ -2442,11 +2449,11 @@ public void testEstimateTotalDocSize() throws Exception { */ public void testReadSnapshotConcurrently() throws IOException, InterruptedException { IndexShard indexShard = newStartedShard(); - indexDoc(indexShard, "doc", "0", "{\"foo\" : \"bar\"}"); + indexDoc(indexShard, "_doc", "0", "{}"); if (randomBoolean()) { indexShard.refresh("test"); } - indexDoc(indexShard, "doc", "1", "{\"foo\" : \"bar\"}"); + indexDoc(indexShard, "_doc", "1", "{}"); indexShard.flush(new FlushRequest()); closeShards(indexShard); @@ -2490,7 +2497,7 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { IndexShard indexShard = newStartedShard(isPrimary); final long numDocs = between(10, 100); for (long i = 0; i < numDocs; i++) { - indexDoc(indexShard, "doc", Long.toString(i), "{\"foo\" : \"bar\"}"); + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); if (randomBoolean()) { indexShard.refresh("test"); } @@ -2577,11 +2584,10 @@ private Result indexOnReplicaWithGaps( for (int i = offset + 1; i < operations; i++) { if (!rarely() || i == operations - 1) { // last operation can't be a gap as it's not a gap anymore final String id = Integer.toString(i); - SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "test", id, + SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", id, new BytesArray("{}"), XContentType.JSON); indexShard.applyIndexOperationOnReplica(i, - 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, - getMappingUpdater(indexShard, sourceToParse.type())); + 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); if (!gap && i == localCheckpoint + 1) { localCheckpoint++; } @@ -2697,12 +2703,12 @@ public void testSegmentMemoryTrackedInBreaker() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); recoverShardFromStore(primary); - indexDoc(primary, "test", "0", "{\"foo\" : \"foo\"}"); + indexDoc(primary, "_doc", "0", "{\"foo\" : \"foo\"}"); primary.refresh("forced refresh"); SegmentsStats ss = primary.segmentStats(randomBoolean()); @@ -2710,9 +2716,9 @@ public void testSegmentMemoryTrackedInBreaker() throws Exception { assertThat(ss.getMemoryInBytes(), equalTo(breaker.getUsed())); final long preRefreshBytes = ss.getMemoryInBytes(); - indexDoc(primary, "test", "1", "{\"foo\" : \"bar\"}"); - indexDoc(primary, "test", "2", "{\"foo\" : \"baz\"}"); - indexDoc(primary, "test", "3", "{\"foo\" : \"eggplant\"}"); + indexDoc(primary, "_doc", "1", "{\"foo\" : \"bar\"}"); + indexDoc(primary, "_doc", "2", "{\"foo\" : \"baz\"}"); + indexDoc(primary, "_doc", "3", "{\"foo\" : \"eggplant\"}"); ss = primary.segmentStats(randomBoolean()); breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); @@ -2725,7 +2731,7 @@ public void testSegmentMemoryTrackedInBreaker() throws Exception { assertThat(breaker.getUsed(), equalTo(ss.getMemoryInBytes())); assertThat(breaker.getUsed(), greaterThan(preRefreshBytes)); - indexDoc(primary, "test", "4", "{\"foo\": \"potato\"}"); + indexDoc(primary, "_doc", "4", "{\"foo\": \"potato\"}"); // Forces a refresh with the INTERNAL scope ((InternalEngine) primary.getEngine()).writeIndexingBuffer(); @@ -2736,7 +2742,7 @@ public void testSegmentMemoryTrackedInBreaker() throws Exception { final long postRefreshBytes = ss.getMemoryInBytes(); // Deleting a doc causes its memory to be freed from the breaker - deleteDoc(primary, "test", "0"); + deleteDoc(primary, "_doc", "0"); primary.refresh("force refresh"); ss = primary.segmentStats(randomBoolean()); @@ -2755,7 +2761,7 @@ public void testSegmentMemoryTrackedWithRandomSearchers() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); @@ -2775,13 +2781,13 @@ public void testSegmentMemoryTrackedWithRandomSearchers() throws Exception { if (randomBoolean()) { String id = "id-" + threadName + "-" + i; logger.debug("--> {} indexing {}", threadName, id); - indexDoc(primary, "test", id, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); + indexDoc(primary, "_doc", id, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); } if (randomBoolean() && i > 10) { String id = "id-" + threadName + "-" + randomIntBetween(0, i - 1); logger.debug("--> {}, deleting {}", threadName, id); - deleteDoc(primary, "test", id); + deleteDoc(primary, "_doc", id); } if (randomBoolean()) { @@ -2850,7 +2856,7 @@ public void testFlushOnInactive() throws Exception { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState @@ -2871,7 +2877,7 @@ public void onShardInactive(IndexShard indexShard) { primaryRef.set(primary); recoverShardFromStore(primary); for (int i = 0; i < 3; i++) { - indexDoc(primary, "test", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); + indexDoc(primary, "_doc", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); primary.refresh("test"); // produce segments } List segments = primary.segments(false); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index 1c9914fb36650..28debbb4e3432 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -57,10 +57,15 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { @Override public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { assertThat(shardId, is(randomShardId)); - if (result.hasFailure() == false) { - postIndex.incrementAndGet(); - } else { - postIndex(shardId, index, result.getFailure()); + switch (result.getResultType()) { + case SUCCESS: + postIndex.incrementAndGet(); + break; + case FAILURE: + postIndex(shardId, index, result.getFailure()); + break; + default: + throw new IllegalArgumentException("unknown result type: " + result.getResultType()); } } @@ -80,10 +85,15 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { @Override public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) { assertThat(shardId, is(randomShardId)); - if (result.hasFailure() == false) { - postDelete.incrementAndGet(); - } else { - postDelete(shardId, delete, result.getFailure()); + switch (result.getResultType()) { + case SUCCESS: + postDelete.incrementAndGet(); + break; + case FAILURE: + postDelete(shardId, delete, result.getFailure()); + break; + default: + throw new IllegalArgumentException("unknown result type: " + result.getResultType()); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index 12c3804a1a7b0..1257aea3d14fa 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -67,8 +67,8 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { for (int i = 0; i < numDocs; i++) { // Index doc but not advance local checkpoint. shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, - SourceToParse.source(shard.shardId().getIndexName(), "test", Integer.toString(i), new BytesArray("{}"), XContentType.JSON), - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, "test")); + SourceToParse.source(shard.shardId().getIndexName(), "_doc", Integer.toString(i), new BytesArray("{}"), XContentType.JSON), + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); } long globalCheckPoint = numDocs > 0 ? randomIntBetween(0, numDocs - 1) : 0; @@ -121,8 +121,8 @@ public void testSyncerOnClosingShard() throws Exception { for (int i = 0; i < numDocs; i++) { // Index doc but not advance local checkpoint. shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, - SourceToParse.source(shard.shardId().getIndexName(), "test", Integer.toString(i), new BytesArray("{}"), XContentType.JSON), - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, "test")); + SourceToParse.source(shard.shardId().getIndexName(), "_doc", Integer.toString(i), new BytesArray("{}"), XContentType.JSON), + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); } String allocationId = shard.routingEntry().allocationId().getId(); diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index a2149b9d28a0b..27e1c1af2bb83 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -51,6 +51,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -117,7 +118,7 @@ public void testSyncedFlush() throws ExecutionException, InterruptedException, I ShardsSyncedFlushResult result; if (randomBoolean()) { logger.info("--> sync flushing shard 0"); - result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId(index, 0)); + result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0)); } else { logger.info("--> sync flushing index [test]"); SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); @@ -246,11 +247,14 @@ private void indexDoc(Engine engine, String id) throws IOException { } private String syncedFlushDescription(ShardsSyncedFlushResult result) { - return result.shardResponses().entrySet().stream() + String detail = result.shardResponses().entrySet().stream() .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") .collect(Collectors.joining(",")); + return String.format(Locale.ROOT, "Total shards: [%d], failed: [%s], reason: [%s], detail: [%s]", + result.totalShards(), result.failed(), result.failureReason(), detail); } + @TestLogging("_root:DEBUG") public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -275,7 +279,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { for (int i = 0; i < extraDocs; i++) { indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); } - final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); @@ -287,7 +291,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i); } } - final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } @@ -308,11 +312,11 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { for (int i = 0; i < numDocs; i++) { index("test", "doc", Integer.toString(i)); } - final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("First seal: {}", syncedFlushDescription(firstSeal)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); // Do not renew synced-flush - final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); @@ -321,7 +325,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { for (int i = 0; i < moreDocs; i++) { index("test", "doc", Integer.toString(i)); } - final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); @@ -337,7 +341,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true)); assertThat(shard.commitStats().syncId(), nullValue()); } - final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index 6532ac2ab1b88..987f69b65878a 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -18,11 +18,11 @@ */ package org.elasticsearch.indices.flush; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.InternalTestCluster; @@ -40,9 +40,11 @@ private SyncedFlushUtil() { /** * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} */ - public static ShardsSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, ShardId shardId) { + public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) { SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - LatchedListener listener = new LatchedListener(); + logger.debug("Issue synced-flush on node [{}], shard [{}], cluster state [{}]", + service.nodeName(), shardId, cluster.clusterService(service.nodeName()).state()); + LatchedListener listener = new LatchedListener<>(); service.attemptSyncedFlush(shardId, listener); try { listener.latch.await(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 91b35594772cf..3b50fa649150c 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -52,7 +52,7 @@ public void testGetStartingSeqNo() throws Exception { final long initDocs = scaledRandomIntBetween(1, 10); { for (int i = 0; i < initDocs; i++) { - indexDoc(replica, "doc", Integer.toString(i)); + indexDoc(replica, "_doc", Integer.toString(i)); if (randomBoolean()) { flushShard(replica); } @@ -68,7 +68,7 @@ public void testGetStartingSeqNo() throws Exception { final int moreDocs = randomIntBetween(1, 10); { for (int i = 0; i < moreDocs; i++) { - indexDoc(replica, "doc", Long.toString(i)); + indexDoc(replica, "_doc", Long.toString(i)); if (randomBoolean()) { flushShard(replica); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 4e9d0ccb22e11..537409f35d175 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -122,23 +122,23 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { final String indexName = orgReplica.shardId().getIndexName(); // delete #1 - orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL, u -> {}); + orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL); getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation // index #0 orgReplica.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON), u -> {}); + SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON)); // index #3 orgReplica.applyIndexOperationOnReplica(3, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON), u -> {}); + SourceToParse.source(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON)); // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1. orgReplica.flush(new FlushRequest().force(true).waitIfOngoing(true)); // index #2 orgReplica.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON), u -> {}); + SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); // index #5 -> force NoOp #4. orgReplica.applyIndexOperationOnReplica(5, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON), u -> {}); + SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON)); final int translogOps; if (randomBoolean()) { @@ -247,9 +247,11 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { int numDocs = between(1, 100); long globalCheckpoint = 0; for (int i = 0; i < numDocs; i++) { - primaryShard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, - SourceToParse.source(primaryShard.shardId().getIndexName(), "test", Integer.toString(i), new BytesArray("{}"), - XContentType.JSON), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(primaryShard, "test")); + Engine.IndexResult result = primaryShard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, + SourceToParse.source(primaryShard.shardId().getIndexName(), "_doc", Integer.toString(i), new BytesArray("{}"), + XContentType.JSON), + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); if (randomBoolean()) { globalCheckpoint = randomLongBetween(globalCheckpoint, i); primaryShard.updateLocalCheckpointForShard(primaryShard.routingEntry().allocationId().getId(), globalCheckpoint); diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 8c1e242b3262f..db8aa615c1440 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -19,11 +19,14 @@ package org.elasticsearch.repositories; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotState; @@ -39,7 +42,11 @@ import java.util.Map; import java.util.Set; +import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; /** @@ -101,15 +108,18 @@ public void testAddSnapshots() { public void testInitIndices() { final int numSnapshots = randomIntBetween(1, 30); final Map snapshotIds = new HashMap<>(numSnapshots); + final Map snapshotStates = new HashMap<>(numSnapshots); for (int i = 0; i < numSnapshots; i++) { final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); snapshotIds.put(snapshotId.getUUID(), snapshotId); + snapshotStates.put(snapshotId.getUUID(), randomFrom(SnapshotState.values())); } RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList()); // test that initializing indices works Map> indices = randomIndices(snapshotIds); - RepositoryData newRepoData = repositoryData.initIndices(indices); + RepositoryData newRepoData = new RepositoryData(repositoryData.getGenId(), snapshotIds, snapshotStates, indices, + new ArrayList<>(repositoryData.getIncompatibleSnapshotIds())); List expected = new ArrayList<>(repositoryData.getSnapshotIds()); Collections.sort(expected); List actual = new ArrayList<>(newRepoData.getSnapshotIds()); @@ -153,6 +163,81 @@ public void testGetSnapshotState() { assertNull(repositoryData.getSnapshotState(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()))); } + public void testIndexThatReferencesAnUnknownSnapshot() throws IOException { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + final RepositoryData repositoryData = generateRandomRepoData(); + + XContentBuilder builder = XContentBuilder.builder(xContent); + repositoryData.snapshotsToXContent(builder, ToXContent.EMPTY_PARAMS); + RepositoryData parsedRepositoryData = RepositoryData.snapshotsFromXContent(createParser(builder), repositoryData.getGenId()); + assertEquals(repositoryData, parsedRepositoryData); + + Map snapshotIds = new HashMap<>(); + Map snapshotStates = new HashMap<>(); + for (SnapshotId snapshotId : parsedRepositoryData.getSnapshotIds()) { + snapshotIds.put(snapshotId.getUUID(), snapshotId); + snapshotStates.put(snapshotId.getUUID(), parsedRepositoryData.getSnapshotState(snapshotId)); + } + + final IndexId corruptedIndexId = randomFrom(parsedRepositoryData.getIndices().values()); + + Map> indexSnapshots = new HashMap<>(); + for (Map.Entry snapshottedIndex : parsedRepositoryData.getIndices().entrySet()) { + IndexId indexId = snapshottedIndex.getValue(); + Set snapshotsIds = new LinkedHashSet<>(parsedRepositoryData.getSnapshots(indexId)); + if (corruptedIndexId.equals(indexId)) { + snapshotsIds.add(new SnapshotId("_uuid", "_does_not_exist")); + } + indexSnapshots.put(indexId, snapshotsIds); + } + assertNotNull(corruptedIndexId); + + RepositoryData corruptedRepositoryData = new RepositoryData(parsedRepositoryData.getGenId(), snapshotIds, snapshotStates, + indexSnapshots, new ArrayList<>(parsedRepositoryData.getIncompatibleSnapshotIds())); + + final XContentBuilder corruptedBuilder = XContentBuilder.builder(xContent); + corruptedRepositoryData.snapshotsToXContent(corruptedBuilder, ToXContent.EMPTY_PARAMS); + + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> + RepositoryData.snapshotsFromXContent(createParser(corruptedBuilder), corruptedRepositoryData.getGenId())); + assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index " + corruptedIndexId + " references an unknown " + + "snapshot uuid [_does_not_exist]")); + } + + public void testIndexThatReferenceANullSnapshot() throws IOException { + final XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.JSON).xContent()); + builder.startObject(); + { + builder.startArray("snapshots"); + builder.value(new SnapshotId("_name", "_uuid")); + builder.endArray(); + + builder.startObject("indices"); + { + builder.startObject("docs"); + { + builder.field("id", "_id"); + builder.startArray("snapshots"); + { + builder.startObject(); + if (randomBoolean()) { + builder.field("name", "_name"); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> + RepositoryData.snapshotsFromXContent(createParser(builder), randomNonNegativeLong())); + assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index [docs/_id] references an unknown snapshot uuid [null]")); + } + public static RepositoryData generateRandomRepoData() { final int numIndices = randomIntBetween(1, 30); final List indices = new ArrayList<>(numIndices); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 20e8a9693c5a2..fd6e8e3fdf32d 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -20,13 +20,13 @@ package org.elasticsearch.repositories.blobstore; import org.apache.lucene.store.Directory; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.TestUtil; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.engine.InternalEngineFactory; @@ -73,7 +73,7 @@ public void testRestoreSnapshotWithExistingFiles() throws IOException { final int numDocs = scaledRandomIntBetween(1, 500); recoverShardFromStore(shard); for (int i = 0; i < numDocs; i++) { - indexDoc(shard, "doc", Integer.toString(i)); + indexDoc(shard, "_doc", Integer.toString(i)); if (rarely()) { flushShard(shard, false); } @@ -140,7 +140,7 @@ public void testSnapshotWithConflictingName() throws IOException { final int numDocs = scaledRandomIntBetween(1, 500); recoverShardFromStore(shard); for (int i = 0; i < numDocs; i++) { - indexDoc(shard, "doc", Integer.toString(i)); + indexDoc(shard, "_doc", Integer.toString(i)); if (rarely()) { flushShard(shard, false); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java new file mode 100644 index 0000000000000..75071309458cc --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.io.IOException; +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class RestResizeHandlerTests extends ESTestCase { + + public void testShrinkCopySettingsDeprecated() throws IOException { + final RestResizeHandler.RestShrinkIndexAction handler = + new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class)); + final String copySettings = randomFrom("true", "false"); + final FakeRestRequest request = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .withParams(Collections.singletonMap("copy_settings", copySettings)) + .withPath("source/_shrink/target") + .build(); + handler.prepareRequest(request, mock(NodeClient.class)); + assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]"); + } + + public void testSplitCopySettingsDeprecated() throws IOException { + final RestResizeHandler.RestSplitIndexAction handler = + new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class)); + final String copySettings = randomFrom("true", "false"); + final FakeRestRequest request = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .withParams(Collections.singletonMap("copy_settings", copySettings)) + .withPath("source/_split/target") + .build(); + handler.prepareRequest(request, mock(NodeClient.class)); + assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]"); + } + +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java index dc0c7b55c6d22..7627dafa5a910 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetaDataSerializationTests.java @@ -45,7 +45,7 @@ protected Custom createTestInstance() { entries.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); } entries.sort(Comparator.comparing(RepositoryMetaData::name)); - return new RepositoriesMetaData(entries.toArray(new RepositoryMetaData[entries.size()])); + return new RepositoriesMetaData(entries); } @Override @@ -62,7 +62,7 @@ protected Custom mutateInstance(Custom instance) { } else { entries.remove(randomIntBetween(0, entries.size() - 1)); } - return new RepositoriesMetaData(entries.toArray(new RepositoryMetaData[entries.size()])); + return new RepositoriesMetaData(entries); } public Settings randomSettings() { @@ -94,7 +94,7 @@ protected Custom makeTestChanges(Custom testInstance) { repos.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); } } - return new RepositoriesMetaData(repos.toArray(new RepositoryMetaData[repos.size()])); + return new RepositoriesMetaData(repos); } @Override @@ -114,7 +114,7 @@ protected Custom doParseInstance(XContentParser parser) throws IOException { assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); List repos = repositoriesMetaData.repositories(); repos.sort(Comparator.comparing(RepositoryMetaData::name)); - return new RepositoriesMetaData(repos.toArray(new RepositoryMetaData[repos.size()])); + return new RepositoriesMetaData(repos); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 9cc44e4ae05c1..c6cb6a856812d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -85,6 +85,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.script.MockScriptEngine; @@ -92,6 +93,7 @@ import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.junit.annotations.TestLogging; +import java.io.IOException; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; import java.nio.file.Path; @@ -1241,30 +1243,44 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio .put("compress", false) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); - createIndex("test-idx-1", "test-idx-2"); + final String[] indices = {"test-idx-1", "test-idx-2"}; + createIndex(indices); logger.info("--> indexing some data"); indexRandom(true, client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"), client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar")); logger.info("--> creating snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true).setIndices(indices).get(); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + Repository repository = service.repository("test-repo"); + + final Map indexIds = repository.getRepositoryData().getIndices(); + final Path indicesPath = repo.resolve("indices"); logger.info("--> delete index metadata and shard metadata"); - Path indices = repo.resolve("indices"); - Path testIndex1 = indices.resolve("test-idx-1"); - Path testIndex2 = indices.resolve("test-idx-2"); - Path testIndex2Shard0 = testIndex2.resolve("0"); - IOUtils.deleteFilesIgnoringExceptions(testIndex1.resolve("snapshot-test-snap-1")); - IOUtils.deleteFilesIgnoringExceptions(testIndex2Shard0.resolve("snapshot-test-snap-1")); + for (String index : indices) { + Path shardZero = indicesPath.resolve(indexIds.get(index).getId()).resolve("0"); + if (randomBoolean()) { + Files.delete(shardZero.resolve("index-0")); + } + Files.delete(shardZero.resolve("snap-" + snapshotInfo.snapshotId().getUUID() + ".dat")); + } logger.info("--> delete snapshot"); client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class); + + for (String index : indices) { + assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId()))); + } } public void testDeleteSnapshotWithMissingMetadata() throws Exception { @@ -1357,9 +1373,13 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { logger.info("--> deleting shard level index file"); try (Stream files = Files.list(repo.resolve("indices"))) { - files.forEach(indexPath -> - IOUtils.deleteFilesIgnoringExceptions(indexPath.resolve("0").resolve("index-0")) - ); + files.forEach(indexPath -> { + try { + Files.delete(indexPath.resolve("0").resolve("index-0")); + } catch (IOException e) { + throw new RuntimeException("Failed to delete expected file", e); + } + }); } logger.info("--> creating another snapshot"); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 2658dad373315..d06605c140c45 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -25,10 +25,10 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -59,7 +59,6 @@ import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.ReplicationTracker; @@ -96,7 +95,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; -import java.util.function.Consumer; import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.hamcrest.Matchers.contains; @@ -186,7 +184,8 @@ protected IndexShard newShard( .build(); IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName()) .settings(settings) - .primaryTerm(0, primaryTerm); + .primaryTerm(0, primaryTerm) + .putMapping("_doc", "{ \"properties\": {} }"); return newShard(shardRouting, metaData.build(), listeners); } @@ -562,27 +561,27 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType); sourceToParse.routing(routing); sourceToParse.parent(parentId); + Engine.IndexResult result; if (shard.routingEntry().primary()) { - final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, type)); + result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + updateMappings(shard, IndexMetaData.builder(shard.indexSettings().getIndexMetaData()) + .putMapping(type, result.getRequiredMappingUpdate().toString()).build()); + result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + } shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getEngine().getLocalCheckpointTracker().getCheckpoint()); - return result; } else { - return shard.applyIndexOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0, - VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse, getMappingUpdater(shard, type)); - } - } - - protected Consumer getMappingUpdater(IndexShard shard, String type) { - return update -> { - try { - updateMappings(shard, IndexMetaData.builder(shard.indexSettings().getIndexMetaData()) - .putMapping(type, update.toString()).build()); - } catch (IOException e) { - ExceptionsHelper.reThrowIfNotNull(e); + result = shard.applyIndexOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0, + VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); + if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { + throw new TransportReplicationAction.RetryOnReplicaException(shard.shardId, + "Mappings are not available on the replica yet, triggered update: " + result.getRequiredMappingUpdate()); } - }; + } + return result; } protected void updateMappings(IndexShard shard, IndexMetaData indexMetadata) { @@ -592,10 +591,9 @@ protected void updateMappings(IndexShard shard, IndexMetaData indexMetadata) { protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException { if (shard.routingEntry().primary()) { - return shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL, update -> {}); + return shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL); } else { - return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, - 0L, type, id, VersionType.EXTERNAL, update -> {}); + return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id, VersionType.EXTERNAL); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 2d027e8bfece5..a7fd6768064e9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1352,7 +1352,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexReque * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * - * @param forceRefresh if true all involved indices are refreshed once the documents are indexed. Additionally if true + * @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed. Additionally if {@code true} * some empty dummy documents are may be randomly inserted into the document list and deleted once all documents are indexed. * This is useful to produce deleted documents on the server side. * @param builders the documents to index. @@ -1369,8 +1369,8 @@ public void indexRandom(boolean forceRefresh, List builders * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * - * @param forceRefresh if true all involved indices are refreshed once the documents are indexed. - * @param dummyDocuments if true some empty dummy documents may be randomly inserted into the document list and deleted once + * @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed. + * @param dummyDocuments if {@code true} some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. * @param builders the documents to index. */ @@ -1385,10 +1385,10 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, Listtrue all involved indices are refreshed once the documents are indexed. - * @param dummyDocuments if true some empty dummy documents may be randomly inserted into the document list and deleted once + * @param forceRefresh if {@code true} all involved indices are refreshed once the documents are indexed. + * @param dummyDocuments if {@code true} some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. - * @param maybeFlush if true this method may randomly execute full flushes after index operations. + * @param maybeFlush if {@code true} this method may randomly execute full flushes after index operations. * @param builders the documents to index. */ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List builders) throws InterruptedException, ExecutionException { @@ -1554,27 +1554,27 @@ public enum Scope { Scope scope() default Scope.SUITE; /** - * Returns the number of nodes in the cluster. Default is -1 which means + * Returns the number of nodes in the cluster. Default is {@code -1} which means * a random number of nodes is used, where the minimum and maximum number of nodes * are either the specified ones or the default ones if not specified. */ int numDataNodes() default -1; /** - * Returns the minimum number of data nodes in the cluster. Default is -1. + * Returns the minimum number of data nodes in the cluster. Default is {@code -1}. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ int minNumDataNodes() default -1; /** - * Returns the maximum number of data nodes in the cluster. Default is -1. + * Returns the maximum number of data nodes in the cluster. Default is {@code -1}. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ int maxNumDataNodes() default -1; /** - * Indicates whether the cluster can have dedicated master nodes. If false means data nodes will serve as master nodes - * and there will be no dedicated master (and data) nodes. Default is true which means + * Indicates whether the cluster can have dedicated master nodes. If {@code false} means data nodes will serve as master nodes + * and there will be no dedicated master (and data) nodes. Default is {@code false} which means * dedicated master nodes will be randomly used. */ boolean supportsDedicatedMasters() default true; @@ -1703,7 +1703,7 @@ private int getNumClientNodes() { } /** - * This method is used to obtain settings for the Nth node in the cluster. + * This method is used to obtain settings for the {@code N}th node in the cluster. * Nodes in this cluster are associated with an ordinal number such that nodes can * be started with specific configurations. This method might be called multiple * times with the same ordinal and is expected to return the same value for each invocation. @@ -1878,7 +1878,7 @@ public Collection> transportClientPlugins() { /** * Iff this returns true mock transport implementations are used for the test runs. Otherwise not mock transport impls are used. - * The default is true + * The default is {@code true}. */ protected boolean addMockTransportService() { return true; @@ -1886,7 +1886,7 @@ protected boolean addMockTransportService() { /** * Iff this returns true test zen discovery implementations is used for the test runs. - * The default is true + * The default is {@code true}. */ protected boolean addTestZenDiscovery() { return true; @@ -1957,7 +1957,7 @@ private static double transportClientRatio() { /** * Returns the transport client ratio from the class level annotation or via * {@link System#getProperty(String)} if available. If both are not available this will - * return a random ratio in the interval [0..1] + * return a random ratio in the interval {@code [0..1]}. */ protected double getPerTestTransportClientRatio() { final ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index d127f1a6b3631..cd1aa6b020d35 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -19,7 +19,6 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; @@ -38,6 +37,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -252,7 +252,7 @@ protected IndexService createIndex(String index, Settings settings, String type, */ protected IndexService createIndex(String index, Settings settings, String type, Object... mappings) { CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings); - if (type != null && mappings != null) { + if (type != null) { createIndexRequestBuilder.addMapping(type, mappings); } return createIndex(index, createIndexRequestBuilder); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 12acd21903ec4..5099fc0540de2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1978,7 +1978,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { } /** - * Executed for each node before the n+1 node is restarted. The given client is + * Executed for each node before the {@code n + 1} node is restarted. The given client is * an active client to the node that will be restarted next. */ public void doAfterNodes(int n, Client client) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java index 15c650173bf87..724a99f2c9425 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java @@ -145,7 +145,7 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * If the xContent output contains objects that should be skipped of such treatment, an optional filtering * {@link Predicate} can be supplied that checks xContent paths that should be excluded from this treatment. * - * This predicate should check the xContent path that we want to insert to and return true if the + * This predicate should check the xContent path that we want to insert to and return {@code true} if the * path should be excluded. Paths are string concatenating field names and array indices, so e.g. in: * *

diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockMasterServiceOnMaster.java
new file mode 100644
index 0000000000000..0547ce70f2f91
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockMasterServiceOnMaster.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.apache.logging.log4j.core.util.Throwables;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class BlockMasterServiceOnMaster extends SingleNodeDisruption {
+
+    AtomicReference disruptionLatch = new AtomicReference<>();
+
+
+    public BlockMasterServiceOnMaster(Random random) {
+        super(random);
+    }
+
+
+    @Override
+    public void startDisrupting() {
+        disruptedNode = cluster.getMasterName();
+        final String disruptionNodeCopy = disruptedNode;
+        if (disruptionNodeCopy == null) {
+            return;
+        }
+        ClusterService clusterService = cluster.getInstance(ClusterService.class, disruptionNodeCopy);
+        if (clusterService == null) {
+            return;
+        }
+        logger.info("blocking master service on node [{}]", disruptionNodeCopy);
+        boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
+        assert success : "startDisrupting called without waiting on stopDisrupting to complete";
+        final CountDownLatch started = new CountDownLatch(1);
+        clusterService.getMasterService().submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask() {
+            @Override
+            public Priority priority() {
+                return Priority.IMMEDIATE;
+            }
+
+            @Override
+            public ClusterState execute(ClusterState currentState) throws Exception {
+                started.countDown();
+                CountDownLatch latch = disruptionLatch.get();
+                if (latch != null) {
+                    try {
+                        latch.await();
+                    } catch (InterruptedException e) {
+                        Throwables.rethrow(e);
+                    }
+                }
+                return currentState;
+            }
+
+            @Override
+            public void onFailure(String source, Exception e) {
+                logger.error("unexpected error during disruption", e);
+            }
+        });
+        try {
+            started.await();
+        } catch (InterruptedException e) {
+        }
+    }
+
+    @Override
+    public void stopDisrupting() {
+        CountDownLatch latch = disruptionLatch.get();
+        if (latch != null) {
+            latch.countDown();
+        }
+
+    }
+
+    @Override
+    public void removeAndEnsureHealthy(InternalTestCluster cluster) {
+        removeFromCluster(cluster);
+    }
+
+    @Override
+    public TimeValue expectedTimeToHeal() {
+        return TimeValue.timeValueMinutes(0);
+    }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
index bf0b7376b8148..fc2a85b35a95b 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
@@ -19,14 +19,13 @@
 package org.elasticsearch.test.engine;
 
 import org.apache.logging.log4j.Logger;
+import org.apache.lucene.index.AssertingDirectoryReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.FilterDirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.search.AssertingIndexSearcher;
-import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.QueryCache;
 import org.apache.lucene.search.QueryCachingPolicy;
-import org.apache.lucene.search.ReferenceManager;
 import org.apache.lucene.util.LuceneTestCase;
 import org.elasticsearch.ElasticsearchException;
 import org.elasticsearch.common.logging.Loggers;
@@ -38,6 +37,7 @@
 import org.elasticsearch.index.engine.EngineException;
 import org.elasticsearch.index.shard.ShardId;
 import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.engine.MockInternalEngine;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -47,14 +47,15 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
- * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine}
+ * Support class to build MockEngines like {@link MockInternalEngine}
  * since they need to subclass the actual engine
  */
 public final class MockEngineSupport {
 
     /**
-     * Allows tests to wrap an index reader randomly with a given ratio. This is disabled by default ie. 0.0d since reader wrapping is insanely
-     * slow if {@link org.apache.lucene.index.AssertingDirectoryReader} is used.
+     * Allows tests to wrap an index reader randomly with a given ratio. This
+     * is disabled by default ie. {@code 0.0d} since reader wrapping is insanely
+     * slow if {@link AssertingDirectoryReader} is used.
      */
     public static final Setting WRAP_READER_RATIO =
         Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, Property.IndexScope);
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
index 1c31533c9337d..6654444066d52 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
@@ -474,7 +474,7 @@ public void clearRule() {
     /**
      * Adds a new delegate transport that is used for communication with the given transport service.
      *
-     * @return true iff no other delegate was registered for any of the addresses bound by transport service.
+     * @return {@code true} iff no other delegate was registered for any of the addresses bound by transport service.
      */
     public boolean addDelegate(TransportService transportService, DelegateTransport transport) {
         boolean noRegistered = true;
@@ -487,7 +487,7 @@ public boolean addDelegate(TransportService transportService, DelegateTransport
     /**
      * Adds a new delegate transport that is used for communication with the given transport address.
      *
-     * @return true iff no other delegate was registered for this address before.
+     * @return {@code true} iff no other delegate was registered for this address before.
      */
     public boolean addDelegate(TransportAddress transportAddress, DelegateTransport transport) {
         return transport().transports.put(transportAddress, transport) == null;
diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle
index 9f231429cbcc2..f1917bcaaa11f 100644
--- a/x-pack/docs/build.gradle
+++ b/x-pack/docs/build.gradle
@@ -61,7 +61,6 @@ buildRestTests.expectedUnconvertedCandidates = [
         'en/watcher/trigger/schedule/yearly.asciidoc',
         'en/watcher/troubleshooting.asciidoc',
         'en/rest-api/license/delete-license.asciidoc',
-        'en/rest-api/license/start-trial.asciidoc',
         'en/rest-api/license/update-license.asciidoc',
         'en/ml/api-quickref.asciidoc',
         'en/rest-api/ml/delete-calendar-event.asciidoc',
diff --git a/x-pack/docs/en/release-notes/6.0.0-alpha1.asciidoc b/x-pack/docs/en/release-notes/6.0.0-alpha1.asciidoc
index fc06368b12a98..4bad1268c8716 100644
--- a/x-pack/docs/en/release-notes/6.0.0-alpha1.asciidoc
+++ b/x-pack/docs/en/release-notes/6.0.0-alpha1.asciidoc
@@ -34,6 +34,5 @@ Watcher::
 
 See also:
 
-* <>
 * {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
 * {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes]
diff --git a/x-pack/docs/en/release-notes/6.0.0-alpha2.asciidoc b/x-pack/docs/en/release-notes/6.0.0-alpha2.asciidoc
index e3b011dead255..456b1cb9f4aaf 100644
--- a/x-pack/docs/en/release-notes/6.0.0-alpha2.asciidoc
+++ b/x-pack/docs/en/release-notes/6.0.0-alpha2.asciidoc
@@ -39,6 +39,5 @@ Metadata fails, the update is now retried.
 
 See also:
 
-* <>
 * {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
 * {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes]
diff --git a/x-pack/docs/en/release-notes/6.0.0-beta1.asciidoc b/x-pack/docs/en/release-notes/6.0.0-beta1.asciidoc
index 0dbf145c3d5d0..f3f1c76c19f77 100644
--- a/x-pack/docs/en/release-notes/6.0.0-beta1.asciidoc
+++ b/x-pack/docs/en/release-notes/6.0.0-beta1.asciidoc
@@ -29,6 +29,5 @@ to `false` in your `elasticsearch.yml`. See
 
 See also:
 
-* <>
 * {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
 * {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes]
diff --git a/x-pack/docs/en/release-notes/6.0.0-beta2.asciidoc b/x-pack/docs/en/release-notes/6.0.0-beta2.asciidoc
index 243b2c7349838..5f20a5f06956f 100644
--- a/x-pack/docs/en/release-notes/6.0.0-beta2.asciidoc
+++ b/x-pack/docs/en/release-notes/6.0.0-beta2.asciidoc
@@ -79,6 +79,5 @@ Watcher::
 
 See also:
 
-* <>
 * {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
 * {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes]
diff --git a/x-pack/docs/en/release-notes/6.0.0-rc1.asciidoc b/x-pack/docs/en/release-notes/6.0.0-rc1.asciidoc
index 4d131e2c7871b..e08105a878a8b 100644
--- a/x-pack/docs/en/release-notes/6.0.0-rc1.asciidoc
+++ b/x-pack/docs/en/release-notes/6.0.0-rc1.asciidoc
@@ -72,6 +72,5 @@ Watcher::
 
 See also:
 
-* <>
 * {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
 * {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes]
diff --git a/x-pack/docs/en/release-notes/6.0.0-rc2.asciidoc b/x-pack/docs/en/release-notes/6.0.0-rc2.asciidoc
index b034c8ba382a3..7c07362a5d451 100644
--- a/x-pack/docs/en/release-notes/6.0.0-rc2.asciidoc
+++ b/x-pack/docs/en/release-notes/6.0.0-rc2.asciidoc
@@ -69,6 +69,5 @@ to require multiple binds when authenticating in user search mode.
 
 See also:
 
-* <>
 * {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
 * {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes]
diff --git a/x-pack/docs/en/rest-api/license/start-trial.asciidoc b/x-pack/docs/en/rest-api/license/start-trial.asciidoc
index 8ff793455a239..7754f6feef79c 100644
--- a/x-pack/docs/en/rest-api/license/start-trial.asciidoc
+++ b/x-pack/docs/en/rest-api/license/start-trial.asciidoc
@@ -40,7 +40,7 @@ The following example checks whether you are eligible to start a trial:
 
 [source,js]
 ------------------------------------------------------------
-POST _xpack/license/start_trial
+GET _xpack/license/start_trial
 ------------------------------------------------------------
 // CONSOLE
 // TEST[skip:license testing issues]
@@ -49,6 +49,27 @@ Example response:
 [source,js]
 ------------------------------------------------------------
 {
-  "trial_was_started": true
+  "eligible_to_start_trial": true
 }
 ------------------------------------------------------------
+// NOTCONSOLE
+
+The following example starts a 30-day trial license. The acknowledge
+parameter is required as you are initiating a license that will expire.
+
+[source,js]
+------------------------------------------------------------
+POST _xpack/license/start_trial?acknowledge=true
+------------------------------------------------------------
+// CONSOLE
+// TEST[skip:license testing issues]
+
+Example response:
+[source,js]
+------------------------------------------------------------
+{
+  "trial_was_started": true,
+  "acknowledged": true
+}
+------------------------------------------------------------
+// NOTCONSOLE
\ No newline at end of file
diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
index 2aaca6def915a..f0f209d3fa325 100644
--- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc
@@ -6,14 +6,7 @@ users. To integrate with Active Directory, you configure an `active_directory`
 realm and map Active Directory users and groups to {security} roles in the
 <>.
 
-To protect passwords, communications between Elasticsearch and the Active Directory
-server should be encrypted using SSL/TLS. Clients and nodes that connect via
-SSL/TLS to the Active Directory server need to have the Active Directory server's
-certificate or the server's root CA certificate installed in their keystore or
-truststore. For more information about installing certificates, see
-<>.
-
-==== Configuring an Active Directory Realm
+See {ref}/configuring-ad-realm.html[Configuring an Active Directory Realm].
 
 {security} uses LDAP to communicate with Active Directory, so `active_directory`
 realms are similar to <>. Like LDAP directories,
@@ -39,400 +32,29 @@ Active Directory. Once the user has been found, the Active Directory realm then
 retrieves the user's group memberships from the `tokenGroups` attribute on the
 user's entry in Active Directory.
 
-To configure an `active_directory` realm:
-
-. Add a realm configuration of type `active_directory` to `elasticsearch.yml`
-under the `xpack.security.authc.realms` namespace. At a minimum, you must set the realm
-`type` to `active_directory` and specify the Active Directory `domain_name`. To
-use SSL/TLS for secured communication with the Active Directory server, you must
-also set the `url` attribute and specify the `ldaps` protocol and secure port
-number. If you are configuring multiple realms, you should also explicitly set
-the `order` attribute to control the order in which the realms are consulted
-during authentication. See <>
-for all of the options you can set for an `active_directory` realm.
-+
-NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS.
-      If DNS is not being provided by a Windows DNS server, add a mapping for
-      the domain in the local `/etc/hosts` file.
-+
-For example, the following realm configuration configures {security} to connect
-to `ldaps://example.com:636` to authenticate users through Active Directory.
-+
-[source, yaml]
-------------------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        active_directory:
-          type: active_directory
-          order: 0 <1>
-          domain_name: ad.example.com
-          url: ldaps://ad.example.com:636 <2>
-------------------------------------------------------------
-<1> The realm order controls the order in which the configured realms are checked
-    when authenticating a user.
-<2> If you don't specify the URL, it defaults to `ldap::389`.
-+
-IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
-realms you specify are used for authentication. If you also want to use the
-`native` or `file` realms, you must include them in the realm chain.
-
-. Restart Elasticsearch.
-
-===== Configuring a Bind User
-By default, all of the LDAP operations are run by the user that {security} is
-authenticating. In some cases, regular users may not be able to access all of the
-necessary items within Active Directory and a _bind user_ is needed. A bind user
-can be configured and will be used to perform all operations other than the LDAP
-bind request, which is required to authenticate the credentials provided by the user.
-
-The use of a bind user enables the <> to be
-used with the Active Directory realm and the ability to maintain a set of pooled
-connections to Active Directory. These pooled connection reduce the number of
-resources that must be created and destroyed with every user authentication.
-
-The following example shows the configuration of a bind user through the user of the
-`bind_dn` and `secure_bind_password` settings.
-
-[source, yaml]
-------------------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        active_directory:
-          type: active_directory
-          order: 0
-          domain_name: ad.example.com
-          url: ldaps://ad.example.com:636
-          bind_dn: es_svc_user@ad.example.com <1>
-------------------------------------------------------------
-<1> This is the user that all Active Directory search requests are executed as.
-    Without a bind user configured, all requests run as the user that is authenticating
-    with Elasticsearch.
-
-The password for the `bind_dn` user should be configured by adding the appropriate
-`secure_bind_password` setting to the {es} keystore.
-For example, the following command adds the password for the example realm above:
-
-[source, shell]
-------------------------------------------------------------
-bin/elasticsearch-keystore add xpack.security.authc.realms.active_directory.secure_bind_password
-------------------------------------------------------------
-
-When a bind user is configured, connection pooling is enabled by default.
-Connection pooling can be disabled using the `user_search.pool.enabled` setting.
-
-===== Multiple Domain Support
-When authenticating users across multiple domains in a forest, there are a few minor
-differences in the configuration and the way that users will authenticate. The `domain_name`
-setting should be set to the forest root domain name. The `url` setting also needs to
-be set as you will need to authenticate against the Global Catalog, which uses a different
-port and may not be running on every Domain Controller.
-
-For example, the following realm configuration configures {security} to connect to specific
-Domain Controllers on the Global Catalog port with the domain name set to the forest root.
-
-[source, yaml]
-------------------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        active_directory:
-          type: active_directory
-          order: 0
-          domain_name: example.com <1>
-          url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2>
-          load_balance:
-            type: "round_robin" <3>
-------------------------------------------------------------
-<1> The `domain_name` is set to the name of the root domain in the forest.
-<2> The `url` value used in this example has URLs for two different Domain Controllers,
-which are also Global Catalog servers. Port 3268 is the default port for unencrypted
-communication with the Global Catalog; port 3269 is the default port for SSL connections.
-The servers that are being connected to can be in any domain of the forest as long as
-they are also Global Catalog servers.
-<3> A load balancing setting is provided to indicate the desired behavior when choosing
-the server to connect to.
-
-In this configuration, users will need to use either their full User Principal
-Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of
-the username with `@:`.
-                                          {security} attempts to authenticate against this URL. If the
-                                          URL is not specified, it is derived from the `domain_name`,
-                                          assuming an unencrypted connection to port 389. For example,
-                                          `ldap://:389`. This settings is required when
-                                          connecting using SSL/TLS or via a custom port.
-| `bind_dn`                  | no       | The DN of the user that is used to bind to Active Directory
-                                          and perform searches. Due to its potential security
-                                          impact, `bind_dn` is not exposed via the
-                                          {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-| `bind_password`            | no       | The password for the user that is used to bind to
-                                          Active Directory. Due to its potential security impact,
-                                          `bind_password` is not exposed via the
-                                          {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-                                          *Deprecated.* Use `secure_bind_password` instead. 
-| `secure_bind_password`     | no       | ({ref}/secure-settings.html[Secure])
-                                          The password for the user that is used to bind to Active Directory.
-| `load_balance.type`        | no       | The behavior to use when there are multiple LDAP URLs defined.
-                                          For supported values see <>.
-| `load_balance.cache_ttl`   | no       | When using `dns_failover` or `dns_round_robin` as the load
-                                          balancing type, this setting controls the amount of time to
-                                          cache DNS lookups. Defaults to `1h`.
-| `user_search.base_dn`      | no       | Specifies the context to search for the user. Defaults to the
-                                          root of the Active Directory domain.
-| `user_search.scope`        | no       | Specifies whether the user search should be `sub_tree` (default),
-                                          `one_level`, or `base`. `sub_tree` searches all objects contained
-                                          under `base_dn`. `one_level` only searches users directly
-                                          contained within the `base_dn`. `base` specifies that the
-                                          `base_dn` is a user object and that it is the only user considered.
-| `user_search.filter`       | no       | Specifies a filter to use to lookup a user given a username.
-                                          The default filter looks up `user` objects with either
-                                          `sAMAccountName` or `userPrincipalName`. If specified, this
-                                          must be a valid LDAP user search filter, for example
-                                          `(&(objectClass=user)(sAMAccountName={0}))`. For more
-                                          information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
-| `user_search.upn_filter`   | no       | Specifies a filter to use to lookup a user given a user principal name.
-                                          The default filter looks up `user` objects with
-                                          a matching `userPrincipalName`. If specified, this
-                                          must be a valid LDAP user search filter, for example
-                                          `(&(objectClass=user)(userPrincipalName={1}))`. `{1}` is
-                                          the full user principal name provided by the user. For more
-                                          information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
-| `user_search.down_level_filter` | no  | Specifies a filter to use to lookup a user given a down level logon name (DOMAIN\user).
-                                          The default filter looks up `user` objects with a matching
-                                          `sAMAccountName` in the domain provided. If specified, this
-                                          must be a valid LDAP user search filter, for example
-                                          `(&(objectClass=user)(sAMAccountName={0}))`. For more
-                                          information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
-| `user_search.pool.enabled`      | no  | Enables or disables connection pooling for user search. When
-                                          disabled a new connection is created for every search. The
-                                          default is `true` when `bind_dn` is provided.
-| `user_search.pool.size`         | no  | Specifies the maximum number of connections to Active Directory
-                                          server to allow in the connection pool. Defaults to `20`.
-| `user_search.pool.initial_size` | no  | The initial number of connections to create to Active Directory
-                                          server on startup. Defaults to `0`. Values greater than `0`
-                                          could cause startup failures if the LDAP server is down.
-| `user_search.pool.health_check.enabled` | no | Enables or disables a health check on Active Directory connections in
-                                                 the connection pool. Connections are checked in the
-                                                 background at the specified interval. Defaults to `true`.
-| `user_search.pool.health_check.dn`      | no | Specifies the distinguished name to retrieve as part of
-                                                 the health check. Defaults to the value of `bind_dn` if present, and if
-                                                 not falls back to `user_search.base_dn`.
-| `user_search.pool.health_check.interval` | no | How often to perform background checks of connections in
-                                                  the pool. Defaults to `60s`.
-| `group_search.base_dn`     | no       | Specifies the context to search for groups in which the user
-                                          has membership. Defaults to the root of the Active Directory
-                                          domain.
-| `group_search.scope`       | no       | Specifies whether the group search should be `sub_tree` (default),
-                                          `one_level` or `base`.  `sub_tree` searches all objects contained
-                                          under `base_dn`. `one_level` searches for groups directly
-                                          contained within the `base_dn`. `base` specifies that the
-                                          `base_dn` is a group object and that it is the only group considered.
-| `unmapped_groups_as_roles` | no       | Specifies whether the names of any unmapped Active Directory
-                                          groups should be used as role names and assigned to the user.
-                                          A group is considered to be _unmapped_ if it is not referenced
-                                          in any <> (API based
-                                          role-mappings are not considered).
-                                          Defaults to `false`.
-| `files.role_mapping`       | no       | Specifies the path and file name of the
-                                          <>.
-                                          Defaults to `ES_PATH_CONF/x-pack/role_mapping.yml`,
-                                          where `ES_PATH_CONF` is `ES_HOME/config` (zip/tar installations)
-                                          or `/etc/elasticsearch` (package installations).
-| `follow_referrals`         | no       | Specifies whether {security} should follow referrals returned
-                                          by the Active Directory server. Referrals are URLs returned by
-                                          the server that are to be used to continue the LDAP operation
-                                          (such as `search`). Defaults to `true`.
-| `metadata`                 | no       | Specifies the list of additional LDAP attributes that should
-                                          be stored in the `metadata` of an authenticated user.
-| `ssl.key`                  | no       | Specifies the path to the PEM encoded private key to use if the Active Directory
-                                          server requires client authentication. `ssl.key` and `ssl.keystore.path` may not be used at the
-                                          same time.
-| `ssl.key_passphrase`       | no       | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted.
-| `ssl.certificate`          | no       | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the key
-                                          if the Active Directory server requires client authentication.
-| `ssl.certificate_authorities`| no     | Specifies the paths to the PEM encoded certificate authority certificates that
-                                          should be trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at
-                                          the same time.
-| `ssl.keystore.path`        | no       | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and
-                                          `ssl.keystore.path` may not be used at the same time.
-| `ssl.keystore.password`    | no       | The password to the keystore.
-| `ssl.keystore.key_password`| no       | The password for the key in the keystore. Defaults to the keystore password.
-| `ssl.truststore.path`      | no       | The path to the Java Keystore file that contains the certificates to trust.
-                                          `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time.
-| `ssl.truststore.password`  | no       | The password to the truststore.
-| `ssl.verification_mode`    | no       | Specifies the type of verification to be performed when
-                                          connecting to an Active Directory server using `ldaps`. When
-                                          set to `full`, the hostname or IP address used in the `url`
-                                          must match one of the names in the certificate or the
-                                          connection will not be allowed. Due to their potential security impact,
-                                          `ssl` settings are not exposed via the
-                                          {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-+
-                                          Values are `none`, `certificate`, and `full`. Defaults to `full`.
-+
-                                          See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
-                                          for an explanation of these values.
-| `ssl.supported_protocols`  | no       | Specifies the supported protocols for TLS/SSL.
-| `ssl.cipher_suites`        | no       | Specifies the cipher suites that should be supported when communicating
-                                          with the Active Directory server.
-| `cache.ttl`                | no       | Specifies the time-to-live for cached user entries. A user's
-                                          credentials are cached for this period of time. Specify the
-                                          time period using the standard Elasticsearch
-                                          {ref}/common-options.html#time-units[time units].
-                                          Defaults to `20m`.
-| `cache.max_users`          | no       | Specifies the maximum number of user entries that can be
-                                          stored in the cache at one time. Defaults to 100,000.
-| `cache.hash_algo`          | no       | Specifies the hashing algorithm that is used for the
-                                          cached user credentials.
-                                          See <> for the
-                                          possible values. (Expert Setting).
-|=======================
+See {ref}/security-settings.html#ref-ad-settings[Active Directory Realm Settings].
 
 [[mapping-roles-ad]]
 ==== Mapping Active Directory Users and Groups to Roles
 
-An integral part of a realm authentication process is to resolve the roles
-associated with the authenticated user. Roles define the privileges a user has
-in the cluster.
-
-Since with the `active_directory` realm the users are managed externally in the
-Active Directory server, the expectation is that their roles are managed there
-as well. In fact, Active Directory supports the notion of groups, which often
-represent user roles for different systems in the organization.
-
-The `active_directory` realm enables you to map Active Directory users to roles
-via their Active Directory groups, or other metadata. This role mapping can be
-configured via the {ref}/security-api-role-mapping.html[role-mapping API], or by using
-a file stored on each node. When a user authenticates against an Active
-Directory realm, the privileges for that user are the union of all privileges
-defined by the roles to which the user is mapped.
-
-Within a mapping definition, you specify groups using their distinguished
-names. For example, the following mapping configuration maps the Active
-Directory `admins` group to both the `monitoring` and `user` roles, maps the
-`users` group to the `user` role and maps the `John Doe` user to the `user`
-role.
-
-Configured via the role-mapping API:
-[source,js]
---------------------------------------------------
-PUT _xpack/security/role_mapping/admins
-{
-  "roles" : [ "monitoring" , "user" ],
-  "rules" : { "field" : {
-    "groups" : "cn=admins,dc=example,dc=com" <1>
-  } },
-  "enabled": true
-}
---------------------------------------------------
-// CONSOLE
-<1> The Active Directory distinguished name (DN) of the `admins` group.
-
-[source,js]
---------------------------------------------------
-PUT _xpack/security/role_mapping/basic_users
-{
-  "roles" : [ "user" ],
-  "rules" : { "any": [
-    { "field" : {
-      "groups" : "cn=users,dc=example,dc=com" <1>
-    } },
-    { "field" : {
-      "dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2>
-    } }
-  ] },
-  "enabled": true
-}
---------------------------------------------------
-// CONSOLE
-<1> The Active Directory distinguished name (DN) of the `users` group.
-<2> The Active Directory distinguished name (DN) of the user `John Doe`.
-
-Or, alternatively, configured via the role-mapping file:
-[source, yaml]
-------------------------------------------------------------
-monitoring: <1>
-  - "cn=admins,dc=example,dc=com" <2>
-user:
-  - "cn=users,dc=example,dc=com" <3>
-  - "cn=admins,dc=example,dc=com"
-  - "cn=John Doe,cn=contractors,dc=example,dc=com" <4>
-------------------------------------------------------------
-<1> The name of the role.
-<2> The Active Directory distinguished name (DN) of the `admins` group.
-<3> The Active Directory distinguished name (DN) of the `users` group.
-<4> The Active Directory distinguished name (DN) of the user `John Doe`.
-
-For more information, see <>.
+See {ref}/configuring-ad-realm.html[Configuring an Active Directory realm]. 
 
 [[ad-user-metadata]]
 ==== User Metadata in Active Directory Realms
+
 When a user is authenticated via an Active Directory realm, the following
-properties are populated in the user's _metadata_. This metadata is returned in the
-{ref}/security-api-authenticate.html[authenticate API], and can be used with
-<> in roles.
+properties are populated in the user's _metadata_:
 
 |=======================
 | Field               | Description
@@ -442,51 +64,15 @@ properties are populated in the user's _metadata_. This metadata is returned in
                         groups were mapped to a role).
 |=======================
 
+This metadata is returned in the 
+{ref}/security-api-authenticate.html[authenticate API] and can be used with
+<> in roles.
+
 Additional metadata can be extracted from the Active Directory server by configuring
 the `metadata` setting on the Active Directory realm.
 
 [[active-directory-ssl]]
 ==== Setting up SSL Between Elasticsearch and Active Directory
 
-To protect the user credentials that are sent for authentication, it's highly
-recommended to encrypt communications between Elasticsearch and your Active
-Directory server. Connecting via SSL/TLS ensures that the identity of the Active
-Directory server is authenticated before {security} transmits the user
-credentials, and the usernames and passwords are encrypted in transit.
-
-To encrypt communications between Elasticsearch and Active Directory:
-
-. Configure each node to trust certificates signed by the CA that signed your
-Active Directory server certificates. The following example demonstrates how to trust a CA certificate,
-`cacert.pem`, located within the {xpack} configuration directory:
-+
-[source,shell]
---------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        active_directory:
-          type: active_directory
-          order: 0
-          domain_name: ad.example.com
-          url: ldaps://ad.example.com:636
-          ssl:
-            certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ]
---------------------------------------------------
-+
-The CA cert must be a PEM encoded certificate.
-
-. Set the `url` attribute in the realm configuration to specify the LDAPS protocol
-and the secure port number. For example, `url: ldaps://ad.example.com:636`.
-
-. Restart Elasticsearch.
-
-NOTE: By default, when you configure {security} to connect to Active Directory
-      using SSL/TLS, {security} attempts to verify the hostname or IP address
-      specified with the `url` attribute in the realm configuration with the
-      values in the certificate. If the values in the certificate and realm
-      configuration do not match, {security} does not allow a connection to the
-      Active Directory server. This is done to protect against man-in-the-middle
-      attacks. If necessary, you can disable this behavior by setting the
-      {ref}/security-settings.html#ssl-tls-settings[`ssl.verification_mode`] property to `certificate`.
+See 
+{ref}/configuring-tls.html#tls-active-directory[Encrypting communications between {es} and Active Directory].
\ No newline at end of file
diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc
new file mode 100644
index 0000000000000..6298bb8ef9f54
--- /dev/null
+++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc
@@ -0,0 +1,248 @@
+[role="xpack"]
+[[configuring-ad-realm]]
+=== Configuring an Active Directory realm
+
+You can configure {security} to communicate with Active Directory to authenticate
+users. To integrate with Active Directory, you configure an `active_directory`
+realm and map Active Directory users and groups to {security} roles in the role 
+mapping file.
+
+For more information about Active Directory realms, see 
+{xpack-ref}/active-directory-realm.html[Active Directory User Authentication].
+
+. Add a realm configuration of type `active_directory` to `elasticsearch.yml`
+under the `xpack.security.authc.realms` namespace. At a minimum, you must set 
+the realm `type` to `active_directory` and specify the Active Directory 
+`domain_name`. If you are configuring multiple realms, you should also 
+explicitly set the `order` attribute to control the order in which the realms 
+are consulted during authentication. 
++
+--
+See <> for all of the options you can set for an 
+`active_directory` realm.
+
+NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS.
+      If DNS is not being provided by a Windows DNS server, add a mapping for
+      the domain in the local `/etc/hosts` file.
+
+For example, the following realm configuration configures {security} to connect
+to `ldaps://example.com:636` to authenticate users through Active Directory:
+
+[source, yaml]
+------------------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        active_directory:
+          type: active_directory
+          order: 0 <1>
+          domain_name: ad.example.com
+          url: ldaps://ad.example.com:636 <2>
+------------------------------------------------------------
+<1> The realm order controls the order in which the configured realms are checked
+    when authenticating a user.
+<2> If you don't specify the URL, it defaults to `ldap::389`.
+
+IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
+realms you specify are used for authentication. If you also want to use the
+`native` or `file` realms, you must include them in the realm chain.
+--
+
+. If you are authenticating users across multiple domains in a forest, extra 
+steps are required. There are a few minor differences in the configuration and 
+the way that users authenticate. 
++
+--
+Set the `domain_name` setting to the forest root domain name. 
+
+You must also set the `url` setting, since you must authenticate against the 
+Global Catalog, which uses a different port and might not be running on every 
+Domain Controller.
+
+For example, the following realm configuration configures {security} to connect 
+to specific Domain Controllers on the Global Catalog port with the domain name 
+set to the forest root:
+
+[source, yaml]
+------------------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        active_directory:
+          type: active_directory
+          order: 0
+          domain_name: example.com <1>
+          url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2>
+          load_balance:
+            type: "round_robin" <3>
+------------------------------------------------------------
+<1> The `domain_name` is set to the name of the root domain in the forest.
+<2> The `url` value used in this example has URLs for two different Domain Controllers,
+which are also Global Catalog servers. Port 3268 is the default port for unencrypted
+communication with the Global Catalog; port 3269 is the default port for SSL connections.
+The servers that are being connected to can be in any domain of the forest as long as
+they are also Global Catalog servers.
+<3> A load balancing setting is provided to indicate the desired behavior when choosing
+the server to connect to.
+
+In this configuration, users will need to use either their full User Principal
+Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of
+the username with `@>. 
+--
+
+. (Optional) To protect passwords, 
+<>. 
+
+. Restart {es}.
+
+. Configure a bind user. 
++
+--
+The Active Directory realm authenticates users using an LDAP bind request. By 
+default, all of the LDAP operations are run by the user that {security} is
+authenticating. In some cases, regular users may not be able to access all of the
+necessary items within Active Directory and a _bind user_ is needed. A bind user
+can be configured and is used to perform all operations other than the LDAP bind 
+request, which is required to authenticate the credentials provided by the user.
+
+The use of a bind user enables the 
+{xpack-ref}/run-as-privilege.html[run as feature] to be used with the Active 
+Directory realm and the ability to maintain a set of pooled connections to 
+Active Directory. These pooled connection reduce the number of resources that 
+must be created and destroyed with every user authentication.
+
+The following example shows the configuration of a bind user through the user of 
+the `bind_dn` and `secure_bind_password` settings:
+
+[source, yaml]
+------------------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        active_directory:
+          type: active_directory
+          order: 0
+          domain_name: ad.example.com
+          url: ldaps://ad.example.com:636
+          bind_dn: es_svc_user@ad.example.com <1>
+------------------------------------------------------------
+<1> This is the user that all Active Directory search requests are executed as.
+    Without a bind user configured, all requests run as the user that is authenticating
+    with {es}.
+
+The password for the `bind_dn` user should be configured by adding the 
+appropriate `secure_bind_password` setting to the {es} keystore. For example, 
+the following command adds the password for the example realm above:
+
+[source, shell]
+------------------------------------------------------------
+bin/elasticsearch-keystore add  \
+xpack.security.authc.realms.active_directory.secure_bind_password
+------------------------------------------------------------
+
+When a bind user is configured, connection pooling is enabled by default.
+Connection pooling can be disabled using the `user_search.pool.enabled` setting.
+--
+
+. Map Active Directory users and groups to roles. 
++
+--
+An integral part of a realm authentication process is to resolve the roles
+associated with the authenticated user. Roles define the privileges a user has
+in the cluster.
+
+Since with the `active_directory` realm the users are managed externally in the
+Active Directory server, the expectation is that their roles are managed there
+as well. In fact, Active Directory supports the notion of groups, which often
+represent user roles for different systems in the organization.
+
+The `active_directory` realm enables you to map Active Directory users to roles
+via their Active Directory groups or other metadata. This role mapping can be
+configured via the <> or by using
+a file stored on each node. When a user authenticates against an Active
+Directory realm, the privileges for that user are the union of all privileges
+defined by the roles to which the user is mapped.
+
+Within a mapping definition, you specify groups using their distinguished
+names. For example, the following mapping configuration maps the Active
+Directory `admins` group to both the `monitoring` and `user` roles, maps the
+`users` group to the `user` role and maps the `John Doe` user to the `user`
+role.
+
+Configured via the role-mapping API:
+[source,js]
+--------------------------------------------------
+PUT _xpack/security/role_mapping/admins
+{
+  "roles" : [ "monitoring" , "user" ],
+  "rules" : { "field" : {
+    "groups" : "cn=admins,dc=example,dc=com" <1>
+  } },
+  "enabled": true
+}
+--------------------------------------------------
+// CONSOLE
+<1> The Active Directory distinguished name (DN) of the `admins` group.
+
+[source,js]
+--------------------------------------------------
+PUT _xpack/security/role_mapping/basic_users
+{
+  "roles" : [ "user" ],
+  "rules" : { "any": [
+    { "field" : {
+      "groups" : "cn=users,dc=example,dc=com" <1>
+    } },
+    { "field" : {
+      "dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2>
+    } }
+  ] },
+  "enabled": true
+}
+--------------------------------------------------
+// CONSOLE
+<1> The Active Directory distinguished name (DN) of the `users` group.
+<2> The Active Directory distinguished name (DN) of the user `John Doe`.
+
+Or, alternatively, configured via the role-mapping file:
+[source, yaml]
+------------------------------------------------------------
+monitoring: <1>
+  - "cn=admins,dc=example,dc=com" <2>
+user:
+  - "cn=users,dc=example,dc=com" <3>
+  - "cn=admins,dc=example,dc=com"
+  - "cn=John Doe,cn=contractors,dc=example,dc=com" <4>
+------------------------------------------------------------
+<1> The name of the role.
+<2> The Active Directory distinguished name (DN) of the `admins` group.
+<3> The Active Directory distinguished name (DN) of the `users` group.
+<4> The Active Directory distinguished name (DN) of the user `John Doe`.
+
+For more information, see 
+{xpack-ref}/mapping-roles.html[Mapping users and groups to roles].
+--
+
+. (Optional) Configure the `metadata` setting in the Active Directory realm to 
+include extra properties in the user's metadata. 
++
+--
+By default, `ldap_dn` and `ldap_groups` are populated in the user's metadata. 
+For more information, see 
+{xpack-ref}/active-directory-realm.html#ad-user-metadata[User Metadata in Active Directory Realms]. 
+--
diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc
new file mode 100644
index 0000000000000..8555902e503d3
--- /dev/null
+++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc
@@ -0,0 +1,106 @@
+[role="xpack"]
+[[configuring-file-realm]]
+=== Configuring a file realm
+
+You can manage and authenticate users with the built-in `file` internal realm. 
+All the data about the users for the `file` realm is stored in two files on each 
+node in the cluster: `users` and `users_roles`. Both files are located in 
+`CONFIG_DIR/` and are read on startup.
+
+[IMPORTANT]
+==============================
+The `users` and `users_roles` files are managed locally by the node and are 
+**not** managed globally by the cluster. This means that with a typical 
+multi-node cluster, the exact same changes need to be applied on each and every 
+node in the cluster.
+
+A safer approach would be to apply the change on one of the nodes and have the 
+files distributed or copied to all other nodes in the cluster (either manually 
+or using a configuration management system such as Puppet or Chef).
+==============================
+
+The `file` realm is added to the realm chain by default. You don't need to
+explicitly configure a `file` realm.
+
+For more information about file realms, see 
+{xpack-ref}/file-realm.html[File-based user authentication].
+
+. (Optional) Add a realm configuration of type `file` to `elasticsearch.yml` 
+under the `xpack.security.authc.realms` namespace. At a minimum, you must set 
+the realm `type` to `file`. If you are configuring multiple realms, you should 
+also explicitly set the `order` attribute. 
++
+--
+//See <> for all of the options you can set for a `file` realm.
+
+For example, the following snippet shows a `file` realm configuration that sets
+the `order` to zero so the realm is checked first:
+
+[source, yaml]
+------------------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        file1:
+          type: file
+          order: 0
+------------------------------------------------------------
+--
+
+. Restart {es}.
+
+. Add user information to the `CONFIG_DIR/users` file on each node in the 
+cluster. 
++
+--
+The `users` file stores all the users and their passwords. Each line in the file 
+represents a single user entry consisting of the username and **hashed** password.
+
+[source,bash]
+----------------------------------------------------------------------
+rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W
+alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS
+jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni
+----------------------------------------------------------------------
+
+{security} uses `bcrypt` to hash the user passwords.
+
+While it is possible to modify this files directly using any standard text
+editor, we strongly recommend using the <> tool to apply the 
+required changes.
+
+IMPORTANT:  As the administrator of the cluster, it is your responsibility to
+            ensure the same users are defined on every node in the cluster.
+            {security} does not deliver any mechanism to guarantee this.
+            
+--
+
+. Add role information to the `CONFIG_DIR/users_roles` file on each node 
+in the cluster. 
++
+--
+The `users_roles` file stores the roles associated with the users. For example:
+
+[source,shell]
+--------------------------------------------------
+admin:rdeniro
+power_user:alpacino,jacknich
+user:jacknich
+--------------------------------------------------
+
+Each row maps a role to a comma-separated list of all the users that are
+associated with that role.
+
+You can use the <> tool to update this file. You must ensure that 
+the same changes are made on every node in the cluster. 
+--
+
+. (Optional) Change how often the `users` and `users_roles` files are checked. 
++
+--
+By default, {security} checks these files for changes every 5 seconds. You can
+change this default behavior by changing the `resource.reload.interval.high` 
+setting in the `elasticsearch.yml` file (as this is a common setting in {es},
+changing its value may effect other schedules in the system).
+--
\ No newline at end of file
diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc
new file mode 100644
index 0000000000000..f66a82b06641e
--- /dev/null
+++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc
@@ -0,0 +1,176 @@
+[role="xpack"]
+[[configuring-pki-realm]]
+=== Configuring a PKI realm
+
+You can configure {security} to use Public Key Infrastructure (PKI) certificates
+to authenticate users in {es}. This requires clients to present X.509
+certificates.
+
+NOTE: You cannot use PKI certificates to authenticate users in {kib}.
+
+To use PKI in {es}, you configure a PKI realm, enable client authentication on
+the desired network layers (transport or http), and map the Distinguished Names
+(DNs) from the user certificates to {security} roles in the role mapping file.
+
+You can also use a combination of PKI and username/password authentication. For
+example, you can enable SSL/TLS on the transport layer and define a PKI realm to
+require transport clients to authenticate with X.509 certificates, while still
+authenticating HTTP traffic using username and password credentials. You can 
+also set `xpack.security.transport.ssl.client_authentication` to `optional` to 
+allow clients without certificates to authenticate with other credentials.
+
+IMPORTANT:  You must enable SSL/TLS and enable client authentication to use PKI.
+
+For more information, see {xpack-ref}/pki-realm.html[PKI User Authentication].
+
+. Add a realm configuration of type `pki` to `elasticsearch.yml` under the
+`xpack.security.authc.realms` namespace. At a minimum, you must set the realm 
+`type` to `pki`. If you are configuring multiple realms, you should also 
+explicitly set the `order` attribute. See <> for all of the 
+options you can set for a `pki` realm.
++
+--
+For example, the following snippet shows the most basic `pki` realm configuration:
+
+[source, yaml]
+------------------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        pki1:
+          type: pki
+------------------------------------------------------------
+
+With this configuration, any certificate trusted by the SSL/TLS layer is accepted
+for authentication. The username is the common name (CN) extracted from the DN
+of the certificate.
+
+IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
+realms you specify are used for authentication. If you also want to use the
+`native` or `file` realms, you must include them in the realm chain.
+
+If you want to use something other than the CN of the DN as the username, you
+can specify a regex to extract the desired username. For example, the regex in
+the following configuration extracts the email address from the DN:
+
+[source, yaml]
+------------------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        pki1:
+          type: pki
+          username_pattern: "EMAILADDRESS=(.*?)(?:,|$)"
+------------------------------------------------------------
+--
+
+. Restart {es}.
+
+. <>. 
+
+. Enable client authentication on the desired network layers (transport or http).
++
+--
+//TBD: This step might need to be split into a separate topic with additional details
+//about setting up client authentication. 
+The PKI realm relies on the TLS settings of the node's network interface. The 
+realm can be configured to be more restrictive than the underlying network 
+connection - that is, it is possible to configure the node such that some 
+connections are accepted by the network interface but then fail to be 
+authenticated by the PKI realm. However, the reverse is not possible. The PKI 
+realm cannot authenticate a connection that has been refused by the network 
+interface.
+
+In particular this means:
+
+* The transport or http interface must request client certificates by setting
+  `client_authentication` to `optional` or `required`.
+* The interface must _trust_ the certificate that is presented by the client
+  by configuring either the `truststore` or `certificate_authorities` paths,
+  or by setting `verification_mode` to `none`. See 
+  <> for an explanation of this 
+  setting.
+* The _protocols_ supported by the interface must be compatible with those
+  used by the client.
+
+The relevant network interface (transport or http) must be configured to trust
+any certificate that is to be used within the PKI realm. However, it possible to
+configure the PKI realm to trust only a _subset_ of the certificates accepted
+by the network interface. This is useful when the SSL/TLS layer trusts clients 
+with certificates that are signed by a different CA than the one that signs your 
+users' certificates.
+
+To configure the PKI realm with its own truststore, specify the `truststore.path` 
+option. For example:
+
+[source, yaml]
+------------------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        pki1:
+          type: pki
+          truststore:
+            path: "/path/to/pki_truststore.jks"
+            password: "x-pack-test-password"
+------------------------------------------------------------
+
+The `certificate_authorities` option can be used as an alternative to the
+`truststore.path` setting.
+--
+
+. Map roles for PKI users.
++
+--
+You map roles for PKI users through the 
+<> or by using a file stored on
+each node. When a user authenticates against a PKI realm, the privileges for
+that user are the union of all privileges defined by the roles to which the
+user is mapped.
+
+You identify a user by the distinguished name in their certificate.
+For example, the following mapping configuration maps `John Doe` to the
+`user` role:
+
+Using the role-mapping API:
+[source,js]
+--------------------------------------------------
+PUT _xpack/security/role_mapping/users
+{
+  "roles" : [ "user" ],
+  "rules" : { "field" : {
+    "dn" : "cn=John Doe,ou=example,o=com" <1>
+  } },
+  "enabled": true
+}
+--------------------------------------------------
+// CONSOLE
+<1> The distinguished name (DN) of a PKI user.
+
+Or, alternatively, configured in a role-mapping file:
+[source, yaml]
+------------------------------------------------------------
+user: <1>
+  - "cn=John Doe,ou=example,o=com" <2>
+------------------------------------------------------------
+<1> The name of a role.
+<2> The distinguished name (DN) of a PKI user.
+
+The disinguished name for a PKI user follows X.500 naming conventions which
+place the most specific fields (like `cn` or `uid`) at the beginning of the
+name, and the most general fields (like `o` or `dc`) at the end of the name.
+Some tools, such as _openssl_, may print out the subject name in a different
+ format.
+
+One way that you can determine the correct DN for a certificate is to use the
+<> (use the relevant PKI
+certificate as the means of authentication) and inspect the metadata field in
+the result. The user's distinguished name will be populated under the `pki_dn`
+key. You can also use the authenticate API to validate your role mapping.
+
+For more information, see 
+{xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles].
+--
\ No newline at end of file
diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc
index 507baaf1f1f28..937537ac1a11f 100644
--- a/x-pack/docs/en/security/authentication/file-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc
@@ -1,8 +1,8 @@
 [[file-realm]]
 === File-based User Authentication
 
-You can manage and authenticate users with the built-in `file` internal realm.
-With the `file` realm users are defined in local files on each node in the cluster.
+You can manage and authenticate users with the built-in `file` realm.
+With the `file` realm, users are defined in local files on each node in the cluster.
 
 IMPORTANT:  As the administrator of the cluster, it is your responsibility to
             ensure the same users are defined on every node in the cluster.
@@ -20,127 +20,7 @@ realms you specify are used for authentication. To use the
 
 To define users, {security} provides the {ref}/users-command.html[users]
 command-line tool. This tool enables you to add and remove users, assign user
-roles and manage user passwords.
+roles, and manage user passwords.
 
-==== Configuring a File Realm
-
-The `file` realm is added to the realm chain by default. You don't need to
-explicitly configure a `file` realm to manage users with the `users` tool.
-
-Like other realms, you can configure options for a `file` realm in the
-`xpack.security.authc.realms` namespace in `elasticsearch.yml`.
-
-To configure an `file` realm:
-
-. Add a realm configuration of type `file` to `elasticsearch.yml` under the
-`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to
-`file`. If you are configuring multiple realms, you should also explicitly set
-the `order` attribute. See <> for all of the options you can set
-for a `file` realm.
-+
-For example, the following snippet shows a `file` realm configuration that sets
-the `order` to zero so the realm is checked first:
-+
-[source, yaml]
-------------------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        file1:
-          type: file
-          order: 0
-------------------------------------------------------------
-
-. Restart Elasticsearch.
-
-[[file-realm-settings]]
-===== File Realm Settings
-
-[cols="4,^3,10"]
-|=======================
-| Setting                 | Required  | Description
-| `type`                  | yes       | Indicates the realm type. Must be set to `file`.
-| `order`                 | no        | Indicates the priority of this realm within the
-                                        realm chain. Realms with a lower order are
-                                        consulted first. Although not required, we
-                                        recommend explicitly setting this value when you
-                                        configure multiple realms. Defaults to
-                                        `Integer.MAX_VALUE`.
-| `enabled`              | no         | Indicates whether this realm is enabled or
-                                        disabled. Enables you to disable a realm without
-                                        removing its configuration. Defaults to `true`.
-| `cache.ttl`            | no         | Specifies the time-to-live for cached user entries.
-                                        A user's credentials are cached for this period of
-                                        time. Specify the time period using the standard
-                                        Elasticsearch {ref}/common-options.html#time-units[time units].
-                                        Defaults to `20m`.
-| `cache.max_users`      | no        	| Specifies the maximum number of user entries that
-                                        can be stored in the cache at one time. Defaults
-                                        to 100,000.
-| `cache.hash_algo`      | no        	| Specifies the hashing algorithm that is used for
-                                        the cached user credentials. See <> for the possible values.
-                                        (Expert Setting).
-|=======================
-
-==== A Look Under the Hood
-
-All the data about the users for the `file` realm is stored in two files, `users`
-and `users_roles`. Both files are located in `CONFIG_DIR/x-pack/` and are read
-on startup.
-
-By default, {security} checks these files for changes every 5 seconds. You can
-change this default behavior by changing the `resource.reload.interval.high` setting in
-the `elasticsearch.yml` file (as this is a common setting in Elasticsearch,
-changing its value may effect other schedules in the system).
-
-[IMPORTANT]
-==============================
-These files are managed locally by the node and are **not** managed
-globally by the cluster. This means that with a typical multi-node cluster,
-the exact same changes need to be applied on each and every node in the
-cluster.
-
-A safer approach would be to apply the change on one of the nodes and have the
-`users` and `users_roles` files distributed/copied to all other nodes in the
-cluster (either manually or using a configuration management system such as
-Puppet or Chef).
-==============================
-
-While it is possible to modify these files directly using any standard text
-editor, we strongly recommend using the {ref}/users-command.html[`bin/elasticsearch-users`]
-command-line tool to apply the required changes.
-
-[float]
-[[users-file]]
-===== The `users` File
-The `users` file stores all the users and their passwords. Each line in the
-`users` file represents a single user entry consisting of the username and
-**hashed** password.
-
-[source,bash]
-----------------------------------------------------------------------
-rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W
-alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS
-jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni
-----------------------------------------------------------------------
-
-NOTE: {security} uses `bcrypt` to hash the user passwords.
-
-[float]
-[[users_defining-roles]]
-==== The `users_roles` File
-
-The `users_roles` file stores the roles associated with the users, as in the
-following example:
-
-[source,shell]
---------------------------------------------------
-admin:rdeniro
-power_user:alpacino,jacknich
-user:jacknich
---------------------------------------------------
-
-Each row maps a role to a comma-separated list of all the users that are
-associated with that role.
+For more information, see 
+{ref}/configuring-file-realm.html[Configuring a file realm].
\ No newline at end of file
diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
index bd32c49622877..15b014183aa46 100644
--- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc
@@ -137,211 +137,13 @@ The `load_balance.type` setting can be used at the realm level to configure how
 {security} should interact with multiple LDAP servers. {security} supports both
 failover and load balancing modes of operation.
 
-.Load Balancing and Failover Types
-|=======================
-| Type              | | | Description
-| `failover`        | | | The URLs specified are used in the order that they are specified.
-                          The first server that can be connected to will be used for all
-                          subsequent connections. If a connection to that server fails then
-                          the next server that a connection can be established to will be
-                          used for subsequent connections.
-| `dns_failover`    | | | In this mode of operation, only a single URL may be specified.
-                          This URL must contain a DNS name. The system will be queried for
-                          all IP addresses that correspond to this DNS name. Connections to
-                          the LDAP server will always be tried in the order in which they
-                          were retrieved. This differs from `failover` in that there is no
-                          reordering of the list and if a server has failed at the beginning
-                          of the list, it will still be tried for each subsequent connection.
-| `round_robin`     | | | Connections will continuously iterate through the list of provided
-                          URLs. If a server is unavailable, iterating through the list of
-                          URLs will continue until a successful connection is made.
-| `dns_round_robin` | | | In this mode of operation, only a single URL may be specified. This
-                          URL must contain a DNS name. The system will be queried for all IP
-                          addresses that correspond to this DNS name. Connections will
-                          continuously iterate through the list of addresses. If a server is
-                          unavailable, iterating through the list of URLs will continue until
-                          a successful connection is made.
-|=======================
+See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings].
 
 
 [[ldap-settings]]
 ===== LDAP Realm Settings
 
-.Common LDAP Realm Settings
-[cols="4,^3,10"]
-|=======================
-| Setting                        | Required | Description
-| `type`                         | yes      | Indicates the realm type. Must be set to `ldap`.
-| `order`                        | no       | Indicates the priority of this realm within the realm
-                                              chain. Realms with a lower order are consulted first.
-                                              Although not required, we recommend explicitly
-                                              setting this value when you configure multiple realms.
-                                              Defaults to `Integer.MAX_VALUE`.
-| `enabled`                      | no       | Indicates whether this realm is enabled or disabled.
-                                              Enables you to disable a realm without removing its
-                                              configuration. Defaults to `true`.
-| `url`                          | yes      | Specifies one or more LDAP URLs of the form of
-                                              `ldap[s]://:`. Multiple URLs can be
-                                              defined using a comma separated value or array syntax:
-                                              `[ "ldaps://server1:636", "ldaps://server2:636" ]`.
-                                              `ldaps` and `ldap` URL protocols cannot be mixed in
-                                              the same realm.
-| `load_balance.type`            | no       | The behavior to use when there are multiple LDAP URLs
-                                              defined. For supported values see
-                                              <>.
-| `load_balance.cache_ttl`       | no       | When using `dns_failover` or `dns_round_robin` as the
-                                              load balancing type, this setting controls the amount of time
-                                              to cache DNS lookups. Defaults to `1h`.
-| `user_group_attribute`         | no       | Specifies the attribute to examine on the user for group
-                                              membership. The default is `memberOf`. This setting will
-                                              be ignored if any `group_search` settings are specified.
-| `group_search.base_dn`         | no       | Specifies a container DN to search for groups in which
-                                              the user has membership. When this element is absent,
-                                              Security searches for the attribute specified by
-                                              `user_group_attribute` set on the user to determine
-                                              group membership.
-| `group_search.scope`           | no       | Specifies whether the group search should be
-                                              `sub_tree`, `one_level` or `base`.  `one_level` only
-                                              searches objects directly contained within the
-                                              `base_dn`. The default `sub_tree` searches all objects
-                                              contained under `base_dn`. `base` specifies that the
-                                              `base_dn` is a group object, and that it is the only
-                                              group considered.
-| `group_search.filter`          | no       | Specifies a filter to use to lookup a group. If not
-                                              set, the realm searches for `group`,
-                                              `groupOfNames`, `groupOfUniqueNames`, or `posixGroup` with the
-                                              attributes `member`, `memberOf`, or `memberUid`. Any instance of
-                                              `{0}` in the filter is replaced by the user
-                                              attribute defined in `group_search.user_attribute`
-| `group_search.user_attribute`  | no       | Specifies the user attribute that is fetched and
-                                              provided as a parameter to the filter.  If not set,
-                                              the user DN is passed to the filter.
-| `unmapped_groups_as_roles`     | no       | Specifies whether the names of any unmapped LDAP groups
-                                              should be used as role names and assigned to the user.
-                                              A group is considered to be _unmapped_ if it is not referenced
-                                              in any <> (API based
-                                              role-mappings are not considered).
-                                              Defaults to `false`.
-| `timeout.tcp_connect`          | no       | Specifies the TCP connect timeout period for establishing an
-                                              LDAP connection. An `s` at the end indicates seconds, or `ms`
-                                              indicates milliseconds. Defaults to `5s` (5 seconds).
-| `timeout.tcp_read`             | no       | Specifies the TCP read timeout period after establishing an LDAP connection.
-                                              An `s` at the end indicates seconds, or `ms` indicates milliseconds.
-                                              Defaults to `5s` (5 seconds).
-| `timeout.ldap_search`          | no       | Specifies the LDAP Server enforced timeout period for an LDAP search.
-                                              An `s` at the end indicates seconds, or `ms` indicates milliseconds.
-                                              Defaults to `5s` (5 seconds).
-| `files.role_mapping`           | no       | Specifies the path and file name for the
-                                              <>.
-                                              Defaults to `ES_HOME/config/x-pack/role_mapping.yml`.
-| `follow_referrals`             | no       | Specifies whether {security} should follow referrals
-                                              returned by the LDAP server. Referrals are URLs returned by
-                                              the server that are to be used to continue the LDAP operation
-                                              (e.g. search). Defaults to `true`.
-| `metadata`                     | no       | Specifies the list of additional LDAP attributes that should
-                                              be stored in the `metadata` of an authenticated user.
-| `ssl.key`                      | no       | Specifies the path to the PEM encoded private key to use if the LDAP
-                                              server requires client authentication. `ssl.key` and `ssl.keystore.path`
-                                              may not be used at the same time.
-| `ssl.key_passphrase`           | no       | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted.
-| `ssl.certificate`              | no       | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the
-                                              key if the LDAP server requires client authentication.
-| `ssl.certificate_authorities`  | no       | Specifies the paths to the PEM encoded certificate authority certificates that
-                                              should be trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be used
-                                              at the same time.
-| `ssl.keystore.path`            | no       | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and
-                                              `ssl.keystore.path` may not be used at the same time.
-| `ssl.keystore.password`        | no       | The password to the keystore.
-| `ssl.keystore.key_password`    | no       | The password for the key in the keystore. Defaults to the keystore password.
-| `ssl.truststore.path`          | no       | The path to the Java Keystore file that contains the certificates to trust.
-                                              `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time.
-| `ssl.truststore.password`      | no       | The password to the truststore.
-| `ssl.verification_mode`        | no       | Specifies the type of verification to be performed when
-                                              connecting to a LDAP server using `ldaps`. When
-                                              set to `full`, the hostname or IP address used in the `url`
-                                              must match one of the names in the certificate or the
-                                              connection will not be allowed. Due to their potential security impact,
-                                              `ssl` settings are not exposed via the
-                                              {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-                                              Values are `none`, `certificate`, and `full`. Defaults to `full`.
-                                              See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
-                                              for an explanation of these values.
-| `ssl.supported_protocols`      | no       | Specifies the supported protocols for SSL/TLS.
-| `ssl.cipher_suites`            | no       | Specifies the cipher suites that should be supported when communicating
-                                              with the LDAP server.
-| `cache.ttl`                | no           | Specifies the time-to-live for cached user entries. A
-                                              user's credentials are cached for this period of time.
-                                              Specify the time period using the standard Elasticsearch
-                                              {ref}/common-options.html#time-units[time units].
-                                              Defaults to `20m`.
-| `cache.max_users`          | no           | Specifies the maximum number of user entries that can be
-                                              stored in the cache at one time. Defaults to 100,000.
-| `cache.hash_algo`          | no           | Specifies the hashing algorithm that is used for the
-                                              cached user credentials. See
-                                              <> for the possible
-                                              values. (Expert Setting).
-|=======================
-
-.User Search Mode Settings
-|=======================
-| Setting                                  | Required | Description
-| `bind_dn`                                | no       | The DN of the user that is used to bind to the LDAP
-                                                        and perform searches. If not specified, an anonymous
-                                                        bind is attempted. Due to its potential security
-                                                        impact, `bind_dn` is not exposed via the
-                                                        {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-| `bind_password`                          | no       | The password for the user that is used to bind to the
-                                                        LDAP directory. Due to its potential security impact,
-                                                        `bind_password` is not exposed via the
-                                                        {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
-                                                        *Deprecated.* Use `secure_bind_password` instead. 
-| `secure_bind_password`                   | no       | ({ref}/secure-settings.html[Secure])
-                                                        The password for the user that is used to bind to LDAP directory.
-| `user_search.base_dn`                    | yes      | Specifies a container DN to search for users.
-| `user_search.scope`                      | no       | The scope of the user search. Valid values are `sub_tree`,
-                                                        `one_level` or `base`. `one_level` only searches objects
-                                                        directly contained within the `base_dn`. `sub_tree` searches
-                                                        all objects contained under `base_dn`. `base` specifies
-                                                        that the `base_dn` is the user object, and that it is the
-                                                        only user considered. Defaults to `sub_tree`.
-| `user_search.filter`                     | no       | Specifies the filter used to search the directory in attempt to match
-                                                        an entry with the username provided by the user. Defaults to `(uid={0})`.
-                                                        `{0}` is substituted with the username provided when searching.
-| `user_search.attribute`                  | no       | This setting is deprecated; use `user_search.filter` instead.
-                                                        Specifies the attribute to match with the username presented
-                                                        to. Defaults to `uid`.
-| `user_search.pool.enabled`               | no       | Enables or disables connection pooling for user search. When
-                                                        disabled a new connection is created for every search. The
-                                                        default is `true`.
-| `user_search.pool.size`                  | no       | Specifies the maximum number of connections to the LDAP
-                                                        server to allow in the connection pool. Defaults to `20`.
-| `user_search.pool.initial_size`          | no       | The initial number of connections to create to the LDAP
-                                                        server on startup. Defaults to `0`. Values greater than `0`
-                                                        could cause startup failures if the LDAP server is down.
-| `user_search.pool.health_check.enabled`  | no       | Enables or disables a health check on LDAP connections in
-                                                        the connection pool. Connections are checked in the
-                                                        background at the specified interval. Defaults to `true`.
-| `user_search.pool.health_check.dn`       | no/yes   | Specifies the distinguished name to retrieve as part of
-                                                        the health check. Defaults to the value of `bind_dn`.
-                                                        This setting is required when `bind_dn` is not configured.
-| `user_search.pool.health_check.interval` | no       | How often to perform background checks of connections in
-                                                        the pool. Defaults to `60s`.
-|=======================
-
-.User Templates Mode Settings
-[cols="4,^3,10"]
-|=======================
-| Setting               | Required  | Description
-| `user_dn_templates`   | yes       | Specifies the DN template that replaces the
-                                      user name with the string `{0}`. This element
-                                      is multivalued, allowing for multiple user
-                                      contexts.
-|=======================
-
-
-NOTE:   If any settings starting with `user_search` are specified, the
-        `user_dn_templates` the settings are ignored.
-
+See {ref}/security-settings.html#ref-ldap-settings[LDAP Realm Settings].
 
 [[mapping-roles-ldap]]
 ==== Mapping LDAP Groups to Roles
diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc
index 8cd150b9c1c99..1c3afdacdc5c1 100644
--- a/x-pack/docs/en/security/authentication/native-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc
@@ -1,5 +1,5 @@
 [[native-realm]]
-=== Native User Authentication
+=== Native user authentication
 
 The easiest way to manage and authenticate users is with the internal `native`
 realm. You can use the REST APIs or Kibana to add and remove users, assign user roles, and
@@ -7,7 +7,7 @@ manage user passwords.
 
 [[native-realm-configuration]]
 [float]
-==== Configuring a Native Realm
+==== Configuring a native realm
 
 The native realm is added to the realm chain by default. You don't need to
 explicitly configure a native realm to manage users through the REST APIs.
@@ -47,45 +47,12 @@ xpack:
 . Restart Elasticsearch.
 
 [[native-settings]]
-.Native Realm Settings
-[cols="4,^3,10"]
-|=======================
-| Setting             | Required  | Description
-
-| `type`              | yes       | Indicates the realm type. Must be set to `native`.
-
-| `order`             | no        | Indicates the priority of this realm within
-                                    the realm chain. Realms with a lower order
-                                    are consulted first. Although not required,
-                                    we recommend explicitly setting this value
-                                    when you configure multiple realms. Defaults
-                                    to `Integer.MAX_VALUE`.
-
-| `enabled`           | no        | Indicates whether this realm is enabled or
-                                    disabled. When set to `false`, the realm is
-                                    not added to the realm chain and therefore
-                                    is inactive. Defaults to `true`.
-
-| `cache.ttl`         | no        | Specifies the time-to-live for cached user
-                                    entries. A user's credentials are cached for
-                                    this period of time. Specify the time period
-                                    using the standard Elasticsearch
-                                    {ref}/common-options.html#time-units[time units].
-                                    Defaults to `20m`.
-
-| `cache.max_users`   | no        | Specifies the maximum number of user entries
-                                    that can be cached at any given time. Defaults
-                                    to 100,000.
-
-| `cache.hash_algo`   | no        | Specifies the hashing algorithm that is used
-                                    for the cached user credentials. See
-                                    <>
-                                    for the possible values. (Expert Setting)
-|=======================
+==== Native realm settings
 
+See {ref}/security-settings.html#ref-native-settings[Native Realm Settings]. 
 
 [[managing-native-users]]
-==== Managing Native Users
+==== Managing native users
 
 {security} enables you to easily manage users in {kib} on the 
 *Management / Security / Users* page. 
diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc
index 57cf4dbbce090..47f9670d840aa 100644
--- a/x-pack/docs/en/security/authentication/pki-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc
@@ -1,5 +1,5 @@
 [[pki-realm]]
-=== PKI User Authentication
+=== PKI user authentication
 
 You can configure {security} to use Public Key Infrastructure (PKI) certificates
 to authenticate users in {es}. This requires clients to present X.509
@@ -12,174 +12,9 @@ the desired network layers (transport or http), and map the Distinguished Names
 (DNs) from the user certificates to {security} roles in the
 <>.
 
-You can also use a combination of PKI and username/password authentication. For
-example, you can enable SSL/TLS on the transport layer and define a PKI realm to
-require transport clients to authenticate with X.509 certificates, while still
-authenticating HTTP traffic using username and password credentials. You can also set
-`xpack.security.transport.ssl.client_authentication` to `optional` to allow clients without
-certificates to authenticate with other credentials.
-
-IMPORTANT:  You must enable SSL/TLS and enabled client authentication to use PKI.
-            For more information, see <>.
-
-==== PKI Realm Configuration
-
-Like other realms, you configure options for a `pki` realm under the
-`xpack.security.authc.realms` namespace in `elasticsearch.yml`.
-
-To configure a `pki` realm:
-
-. Add a realm configuration of type `pki` to `elasticsearch.yml` under the
-`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to
-`pki`. If you are configuring multiple realms, you should also explicitly set
-the `order` attribute. See <> for all of the options you can set
-for a `pki` realm.
-+
-For example, the following snippet shows the most basic `pki` realm configuration:
-+
-[source, yaml]
-------------------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        pki1:
-          type: pki
-------------------------------------------------------------
-+
-With this configuration, any certificate trusted by the SSL/TLS layer is accepted
-for authentication. The username is the common name (CN) extracted from the DN
-of the certificate.
-+
-IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
-realms you specify are used for authentication. If you also want to use the
-`native` or `file` realms, you must include them in the realm chain.
-+
-If you want to use something other than the CN of the DN as the username, you
-can specify a regex to extract the desired username. For example, the regex in
-the following configuration extracts the email address from the DN:
-+
-[source, yaml]
-------------------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        pki1:
-          type: pki
-          username_pattern: "EMAILADDRESS=(.*?)(?:,|$)"
-------------------------------------------------------------
-+
-. Restart Elasticsearch.
-
-[[pki-ssl-config]]
-==== PKI and SSL Settings
-
-The PKI realm relies on the SSL settings of the node's network interface
-(transport or http). The realm can be configured to be more restrictive than
-the underlying network connection - that is, it is possible to configure the
-node such that some connections are accepted by the network interface but then
-fail to be authenticated by the PKI realm. However the reverse is not possible
-- the PKI realm cannot authenticate a connection that has been refused by the
-network interface.
-
-In particular this means:
-
-* The transport or http interface must request client certificates by setting
-  `client_authentication` to `optional` or `required`.
-* The interface must _trust_ the certificate that is presented by the client
-  by configuring either the `truststore` or `certificate_authorities` paths,
-  or by setting `verification_mode` to `none`.
-+
-See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
-for an explanation of this setting.
-
-* The _protocols_ supported by the interface must be compatible with those
-  used by the client.
-
-
-The relevant network interface (transport or http) must be configured to trust
-any certificate that is to be used within the PKI realm. However it possible to
-configure the PKI realm to trust only a _subset_ of the certificates accepted
-by the network interface.
-This is useful when the SSL/TLS layer trusts clients with certificates that are
-signed by a different CA than the one that signs your users' certificates.
-
-To configure the PKI realm with its own truststore, specify the
-`truststore.path` option as below:
-
-[source, yaml]
-------------------------------------------------------------
-xpack:
-  security:
-    authc:
-      realms:
-        pki1:
-          type: pki
-          truststore:
-            path: "/path/to/pki_truststore.jks"
-            password: "x-pack-test-password"
-------------------------------------------------------------
-
-The `certificate_authorities` option may be used as an alternative to the
-`truststore.path` setting.
-
+See {ref}/configuring-pki-realm.html[Configuring a PKI realm].
 
 [[pki-settings]]
-===== PKI Realm Settings
-
-See
-{ref}/security-settings.html#_settings_valid_for_all_realms[Security Settings for All Realms]
-and
-{ref}/security-settings.html#ref-pki-settings[PKI Realm Settings].
-
-[[assigning-roles-pki]]
-==== Mapping Roles for PKI Users
-
-You map roles for PKI users through the
-{ref}/security-api-role-mapping.html[role-mapping API], or by using a file stored on
-each node. When a user authenticates against a PKI realm, the privileges for
-that user are the union of all privileges defined by the roles to which the
-user is mapped.
-
-You identify a user by the distinguished name in their certificate.
-For example, the following mapping configuration maps `John Doe` to the
-`user` role:
-
-Using the role-mapping API:
-[source,js]
---------------------------------------------------
-PUT _xpack/security/role_mapping/users
-{
-  "roles" : [ "user" ],
-  "rules" : { "field" : {
-    "dn" : "cn=John Doe,ou=example,o=com" <1>
-  } },
-  "enabled": true
-}
---------------------------------------------------
-// CONSOLE
-<1> The distinguished name (DN) of a PKI user.
-
-Or, alternatively, configured in a role-mapping file:
-[source, yaml]
-------------------------------------------------------------
-user: <1>
-  - "cn=John Doe,ou=example,o=com" <2>
-------------------------------------------------------------
-<1> The name of a role.
-<2> The distinguished name (DN) of a PKI user.
-
-The disinguished name for a PKI user follows X.500 naming conventions which
-place the most specific fields (like `cn` or `uid`) at the beginning of the
-name, and the most general fields (like `o` or `dc`) at the end of the name.
-Some tools, such as _openssl_, may print out the subject name in a different
- format.
-
-One way that you can determine the correct DN for a certificate is to use the
-{ref}/security-api-authenticate.html[authenticate API] (use the relevant PKI
-certificate as the means of authentication) and inspect the metadata field in
-the result. The user's distinguished name will be populated under the `pki_dn`
-key. You can also use the authenticate API to validate your role mapping.
+==== PKI Realm Settings
 
-For more information, see <>.
+See {ref}/security-settings.html#ref-pki-settings[PKI realm settings].
diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc
index 4de8d5a28ce3e..bbf7d597b30ee 100644
--- a/x-pack/docs/en/security/authentication/saml-realm.asciidoc
+++ b/x-pack/docs/en/security/authentication/saml-realm.asciidoc
@@ -25,238 +25,19 @@ for SAML realms.
 [[saml-settings]]
 ==== SAML Realm Settings
 
-[cols="4,^3,10"]
-|=======================
-| Setting                     | Required | Description
-| `type`                      | yes      | Indicates the realm type. Must be set to `saml`.
-| `order`                     | no       | Indicates the priority of this realm within the realm chain.
-                                           Realms with a lower order are consulted first. Although not
-                                           required, we recommend explicitly setting this value when
-                                           you configure multiple realms. Defaults to `Integer.MAX_VALUE`.
-| `enabled`                   | no       | Indicates whether this realm is enabled or disabled. Enables
-                                           you to disable a realm without removing its configuration.
-                                           Defaults to `true`.
-| `idp.entity_id`             | yes      | The Entity ID of the SAML Identity Provider. An Entity ID is
-                                           a URI with a maximum length of 1024 characters. It can be a
-                                           URL (`https://idp.example.com/`) or a URN (`urn:example.com:idp`)
-                                           and can be found in the configuration or the SAML metadata
-                                           of the Identity Provider.
-| `idp.metadata.path`         | yes      | The path (_recommended_) or URL to a SAML 2.0 metadata file
-                                           describing the capabilities and configuration of the Identity
-                                           Provider. 
-                                           If a path is provided, then it is resolved relative to the
-                                           {es} config directory.
-                                           If a URL is provided, then it must be either a `file` URL or
-                                           a `https` URL.
-                                           {security} will automatically poll this metadata resource and
-                                           will reload the IdP configuration when changes are detected.
-                                           File based resources are polled at a frequency determined by
-                                           the global {es} `resource.reload.interval.high` setting, which
-                                           defaults to 5 seconds.
-                                           HTTPS resources are polled at a frequency determined by
-                                           the realm's `idp.metadata.http.refresh` setting.
-| `idp.metadata.http.refresh` | no       | Controls the frequency with which `https` metadata is checked
-                                           for changes. Defaults to 1 hour.
-| `idp.use_single_logout`     | no       | Indicates whether to utilise the Identity Provider's Single
-                                           Logout service (if one exists in the IdP metadata file).
-                                           Defaults to `true`.
-| `sp.entity_id`              | yes      | The Entity ID to use for this SAML Service Provider.
-                                           This should be entered as a URI. We recommend that you use the
-                                           base URL of your {kib} instance,
-                                           e.g. `https://kibana.example.com/`
-| `sp.acs`                    | yes      | The URL of the Assertion Consumer Service within {kib}.
-                                           Typically this will be the "api/security/v1/saml" endpoint of
-                                           your {kib} server,
-                                           e.g. `https://kibana.example.com/api/security/v1/saml`
-| `sp.logout`                 | no       | The URL of the Single Logout service within {kib}.
-                                           Typically this will be the "logout" endpoint of
-                                           your {kib} server,
-                                           e.g. `https://kibana.example.com/logout`
-| `attributes.principal`      | yes      | The Name of the SAML attribute that should be used as the
-                                           {security} user's principal (username)
-| `attributes.groups`         | no       | The Name of the SAML attribute that should be used to populate
-                                           {security} user's groups
-| `attributes.name`           | no       | The Name of the SAML attribute that should be used to populate
-                                           {security} user's full name
-| `attributes.mail`           | no       | The Name of the SAML attribute that should be used to populate
-                                           {security} user's email address
-| `attributes.dn`             | no       | The Name of the SAML attribute that should be used to populate
-                                           {security} user's X.500 _Distinguished Name_
-| `attribute_patterns.principal` | no    | A java regular expression that is matched against the SAML attribute
-                                           specified by `attributes.pattern` before it is applied to the user's
-                                           _principal_ property.
-                                           The attribute value must match the pattern, and the value of the
-                                           first _capturing group_ is used as the principal.
-                                           e.g. `^([^@]+)@example\\.com$` matches email addresses from the
-                                           "example.com" domain and uses the local-part as the principal.
-| `attribute_patterns.groups`    | no    | As per `attribute_patterns.principal`, but for the _group_ property.
-| `attribute_patterns.name`      | no    | As per `attribute_patterns.principal`, but for the _name_ property.
-| `attribute_patterns.mail`      | no    | As per `attribute_patterns.principal`, but for the _mail_ property.
-| `attribute_patterns.dn`        | no    | As per `attribute_patterns.principal`, but for the _dn_ property.
-| `nameid_format`             | no       | The NameID format that should be requested when asking the IdP
-                                           to authenticate the current user.
-                                           Defaults to requesting _transient_ names
-                                           (`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`)
-| `nameid.allow_create`       | no       | The value of the `AllowCreate` attribute of the `NameIdPolicy`
-                                           element in an authentication request.
-                                           Defaults to `false`
-| `nameid.sp_qualifier`       | no       | The value of the `SPNameQualifier` attribute of the `NameIdPolicy`
-                                           element in an authentication request.
-                                           The default is to not include the `SPNameQualifier` attribute.
-| `force_authn`               | no       | Whether to set the `ForceAuthn` attribute when requesting that the
-                                           IdP authenticate the current user. If this is set to `true`, the
-                                           IdP will be required to freshly establish the user's identity,
-                                           irrespective of any exiting sessions they may have.
-                                           Defaults to `false`.
-| `populate_user_metadata`    | no       | Whether to populate the {es} user's metadata with the values that
-                                           are provided by the SAML attributes. Defaults to `true`.
-| `allowed_clock_skew`        | no       | The maximum amount of skew that can be tolerated between the
-                                           IdP's clock and the {es} node's clock. Defaults to 3 minutes.
-|=======================
+See {ref}/security-settings.html#ref-saml-settings[SAML Realm Settings]. 
+
 
 ===== SAML Realm Signing Settings
 
-If a signing key is configured (i.e. is one of `signing.key` or `signing.keystore.path` has been set), then
-{security} will sign outgoing SAML messages. Signing can be configured using the following settings.
+See {ref}/security-settings.html#ref-saml-signing-settings[SAML Realm Signing Settings]. 
 
-|=======================
-| Setting                           | Required | Description
-| `signing.saml_messages`           | no       | A list of SAML message types that should be signed, or `*` to
-                                                 sign all messages. Each element in the list should be the 
-                                                 local name of a SAML XML Element.  Supported element types are
-                                                 `AuthnRequest`, `LogoutRequest` and `LogoutResponse`.
-                                                 Defaults to `*`.
-| `signing.key`                     | no       | Specifies the path to the PEM encoded private key to use for 
-                                                 SAML message signing.
-                                                 `signing.key` and `signing.keystore.path` may not be used at
-                                                  the same time.
-| `signing.secure_key_passphrase`   | no       | ({ref}/secure-settings.html[Secure])
-                                                 Specifies the passphrase to decrypt the PEM encoded private key if
-                                                 it is encrypted.
-| `signing.certificate`             | no       | Specifies the path to the PEM encoded certificate (or certificate
-                                                 chain) that corresponds to the `signing.key`.  This certificate
-                                                 must also be included in the Service Provider metadata, or
-                                                 manually configured within the IdP to allow for signature
-                                                 validation.
-                                                 May only be used if `signing.key` is set.
-| `signing.keystore.path`           | no       | The path to the keystore that contains a private key and
-                                                 certificate.
-                                                 Must be either a Java Keystore (jks) or a PKCS#12 file.
-                                                 `signing.key` and `signing.keystore.path` may not be used at the
-                                                 same time.
-| `signing.keystore.type`           | no       | The type of the keystore. Must be one of "jks" or "PKCS12".
-                                                 Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
-                                                 "pkcs12", otherwise uses "jks"
-| `signing.keystore.alias`          | no       | Specifies the alias of the key within the keystore that should be
-                                                 used for SAML message signing. Must be specified if the keystore
-                                                 contains more than one private key.
-| `signing.keystore.secure_password` | no      | ({ref}/secure-settings.html[Secure]) The password to the keystore.
-| `signing.keystore.secure_key_password` | no  | ({ref}/secure-settings.html[Secure])
-                                                 The password for the key in the keystore.
-                                                 Defaults to the keystore password.
-|=======================
 
 ===== SAML Realm Encryption Settings
 
-If an encryption key is configured (i.e. is one of `encryption.key` or
-`encryption.keystore.path` has been set), then {security} will publish
-an encryption certificate when generating metadata, and will attempt to
-decrypt incoming SAML content.
-Encryption can be configured using the following settings.
-
-|=======================
-| Setting                             | Required | Description
-| `encryption.key`                    | no       | Specifies the path to the PEM encoded private key to use for 
-                                                   SAML message descryption.
-                                                   `encryption.key` and `encryption.keystore.path` may not be used at
-                                                    the same time.
-| `encryption.secure_key_passphrase`  | no       | ({ref}/secure-settings.html[Secure])
-                                                   Specifies the passphrase to decrypt the PEM encoded private key if
-                                                   it is encrypted.
-| `encryption.certificate`            | no       | Specifies the path to the PEM encoded certificate (or certificate
-                                                   chain) that is associated with the `encryption.key`. This
-                                                   certificate must also be included in the Service Provider metadata,
-                                                   or manually configured within the IdP to enable message encryption.
-                                                   May only be used if `encryption.key` is set.
-| `encryption.keystore.path`          | no       | The path to the keystore that contains a private key and
-                                                   certificate.
-                                                   Must be either a Java Keystore (jks) or a PKCS#12 file.
-                                                   `encryption.key` and `encryption.keystore.path` may not be used at
-                                                   the same time.
-| `encryption.keystore.type`          | no       | The type of the keystore. Must be one of "jks" or "PKCS12".
-                                                   Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
-                                                   "pkcs12", otherwise uses "jks"
-| `encryption.keystore.alias`       | no         | Specifies the alias of the key within the keystore that should be
-                                                   used for SAML message decryption. If not specified, all compatible
-                                                   key pairs from the keystore will be considered as candidate keys
-                                                   for decryption.
-| `encryption.keystore.secure_password` | no     | ({ref}/secure-settings.html[Secure]) The password to the keystore.
-| `encryption.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure])
-                                                   The password for the key in the keystore. Only a single password is
-                                                   supported. If you are using multiple decryption keys, then they
-                                                   cannot have individual passwords.
-|=======================
+See {ref}/security-settings.html#ref-saml-encryption-settings[SAML Realm Encryption Settings]. 
 
 ===== SAML Realm SSL Settings
 
-If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path` is a URL using the `https` protocol)
-Then the following settings may be used to configure SSL. If these are not specified, then the {xpack}
-{ref}/security-settings.html#ssl-tls-settings[default SSL settings] are used.
-
-These settings are not used for any purpose other than loading metadata over https.
-
-|=======================
-| Setting                         | Required | Description
-| `ssl.key`                       | no       | Specifies the path to the PEM encoded private key to use for http
-                                               client authentication.
-                                               `ssl.key` and `ssl.keystore.path` may not be used at the same time.
-| `ssl.key_passphrase`            | no       | Specifies the passphrase to decrypt the PEM encoded private key if
-                                               it is encrypted. May not be used with `ssl.secure_key_passphrase`
-| `ssl.secure_key_passphrase`     | no       | ({ref}/secure-settings.html[Secure])
-                                               Specifies the passphrase to decrypt the PEM encoded private key if
-                                               it is encrypted. May not be used with `ssl.key_passphrase`
-| `ssl.certificate`               | no       | Specifies the path to the PEM encoded certificate (or certificate
-                                               chain) that goes with the key. May only be used if `ssl.key` is set.
-| `ssl.certificate_authorities`   | no       | Specifies the paths to the PEM encoded certificate authority
-                                               certificates that should be trusted.
-                                               `ssl.certificate_authorities` and `ssl.truststore.path` may not be
-                                               used at the same time.
-| `ssl.keystore.path`             | no       | The path to the keystore that contains a private key and
-                                               certificate.
-                                               Must be either a Java Keystore (jks) or a PKCS#12 file.
-                                               `ssl.key` and `ssl.keystore.path` may not be used at the same time.
-| `ssl.keystore.type`             | no       | The type of the keystore. Must be one of "jks" or "PKCS12".
-                                               Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
-                                               "pkcs12", otherwise uses "jks"
-| `ssl.keystore.password`         | no       | The password to the keystore.
-                                               May not be used with `ssl.keystore.secure_password`.
-| `ssl.keystore.secure_password`  | no       | ({ref}/secure-settings.html[Secure]) The password to the keystore.
-                                               May not be used with `ssl.keystore.password`.
-| `ssl.keystore.key_password`     | no       | The password for the key in the keystore.
-                                               Defaults to the keystore password.
-                                               May not be used with `ssl.keystore.secure_key_password`.
-| `ssl.keystore.secure_key_password` | no    | ({ref}/secure-settings.html[Secure])
-                                               The password for the key in the keystore.
-                                               Defaults to the keystore password.
-                                               May not be used with `ssl.keystore.key_password`.
-| `ssl.truststore.path`           | no       | The path to the keystore that contains the certificates to trust.
-                                               Must be either a Java Keystore (jks) or a PKCS#12 file.
-                                               `ssl.certificate_authorities` and `ssl.truststore.path` may not be
-                                               used at the same time.
-| `ssl.truststore.type`           | no       | The type of the truststore. Must be one of "jks" or "PKCS12".
-                                               Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
-                                               "pkcs12", otherwise uses "jks"
-| `ssl.truststore.password`       | no       | The password to the truststore.
-                                               May not be used with `ssl.truststore.secure_password`.
-| `ssl.truststore.secure_password` | no      | ({ref}/secure-settings.html[Secure]) The password to the truststore.
-                                               May not be used with `ssl.truststore.password`.
-| `ssl.verification_mode`         | no       | One of `full` (verify the hostname and the certicate path),
-                                               `certificate` (verify the certificate path, but not the hostname) 
-                                               or `none` (perform no verification).  Defaults to `full`.
-+
-                                               See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
-                                               for a more detailed explanation of these values.
-| `ssl.supported_protocols`       | no       | Specifies the supported protocols for TLS/SSL.
-| `ssl.cipher_suites`             | no       | Specifies the cipher suites that should be supported.
-|=======================
+See {ref}/security-settings.html#ref-saml-ssl-settings[SAML Realm SSL Settings]. 
 
diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc
index aab00fb225fd6..114fd1cdc4f15 100644
--- a/x-pack/docs/en/security/configuring-es.asciidoc
+++ b/x-pack/docs/en/security/configuring-es.asciidoc
@@ -1,6 +1,6 @@
 [role="xpack"]
 [[configuring-security]]
-== Configuring Security in {es}
+== Configuring security in {es}
 ++++
 Configuring Security
 ++++
@@ -70,6 +70,11 @@ user API.
 
 --
 
+. Choose which types of realms you want to use to authenticate users. 
+** <>.
+** <>.
+** <>.
+
 . Set up roles and users to control access to {es}.
 For example, to grant _John Doe_ full access to all indices that match
 the pattern `events*` and enable him to create visualizations and dashboards
@@ -128,5 +133,8 @@ include::securing-communications/securing-elasticsearch.asciidoc[]
 include::securing-communications/configuring-tls-docker.asciidoc[]
 include::securing-communications/enabling-cipher-suites.asciidoc[]
 include::securing-communications/separating-node-client-traffic.asciidoc[]
+include::authentication/configuring-active-directory-realm.asciidoc[]
+include::authentication/configuring-file-realm.asciidoc[]
+include::authentication/configuring-pki-realm.asciidoc[]
 include::{xes-repo-dir}/settings/security-settings.asciidoc[]
 include::{xes-repo-dir}/settings/audit-settings.asciidoc[]
diff --git a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc
index b100567edf8b9..e5c1187264f7c 100644
--- a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc
+++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc
@@ -20,9 +20,13 @@ information, see <>.
 .. Required: <>.
 .. Recommended: <>.
 
+. If you are using Active Directory user authentication, 
+<>. 
+
 For more information about encrypting communications across the Elastic Stack,
 see {xpack-ref}/encrypting-communications.html[Encrypting Communications].
 
 include::node-certificates.asciidoc[]
 include::tls-transport.asciidoc[]
 include::tls-http.asciidoc[]
+include::tls-ad.asciidoc[]
diff --git a/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc
new file mode 100644
index 0000000000000..d189501f1e2a5
--- /dev/null
+++ b/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc
@@ -0,0 +1,57 @@
+[role="xpack"]
+[[tls-active-directory]]
+==== Encrypting communications between {es} and Active Directory
+
+To protect the user credentials that are sent for authentication, it's highly
+recommended to encrypt communications between {es} and your Active Directory 
+server. Connecting via SSL/TLS ensures that the identity of the Active Directory 
+server is authenticated before {security} transmits the user credentials and the 
+usernames and passwords are encrypted in transit. 
+
+Clients and nodes that connect via SSL/TLS to the Active Directory server need 
+to have the Active Directory server's certificate or the server's root CA 
+certificate installed in their keystore or truststore. 
+
+. Create the realm configuration for the `xpack.security.authc.realms` namespace 
+in the `elasticsearch.yml` file. See <>. 
+
+. Set the `url` attribute in the realm configuration to specify the LDAPS protocol
+and the secure port number. For example, `url: ldaps://ad.example.com:636`.
+
+. Configure each node to trust certificates signed by the certificate authority 
+(CA) that signed your Active Directory server certificates. 
++
+--
+The following example demonstrates how to trust a CA certificate (`cacert.pem`), 
+which is located within the configuration directory:
+
+[source,shell]
+--------------------------------------------------
+xpack:
+  security:
+    authc:
+      realms:
+        active_directory:
+          type: active_directory
+          order: 0
+          domain_name: ad.example.com
+          url: ldaps://ad.example.com:636
+          ssl:
+            certificate_authorities: [ "CONFIG_DIR/cacert.pem" ]
+--------------------------------------------------
+
+The CA cert must be a PEM encoded certificate.
+
+For more information about these settings, see <>. 
+--
+
+. Restart {es}.
+
+NOTE: By default, when you configure {security} to connect to Active Directory
+      using SSL/TLS, {security} attempts to verify the hostname or IP address
+      specified with the `url` attribute in the realm configuration with the
+      values in the certificate. If the values in the certificate and realm
+      configuration do not match, {security} does not allow a connection to the
+      Active Directory server. This is done to protect against man-in-the-middle
+      attacks. If necessary, you can disable this behavior by setting the 
+      `ssl.verification_mode` property to `certificate`.
diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc
index 35d4d5a4e555e..79d541d74b17b 100644
--- a/x-pack/docs/en/security/troubleshooting.asciidoc
+++ b/x-pack/docs/en/security/troubleshooting.asciidoc
@@ -88,15 +88,15 @@ the users. Any unknown roles are marked with `*`.
 --
 [source, shell]
 ------------------------------------------
-bin/xpack/users list
+bin/elasticsearch-users list
 rdeniro        : admin
 alpacino       : power_user
 jacknich       : monitoring,unknown_role* <1>
 ------------------------------------------
 <1> `unknown_role` was not found in `roles.yml`
 
-For more information about this command, see
-{ref}/users-command.html[Users Command].
+For more information about this command, see the 
+{ref}/users-command.html[`elasticsearch-users` command].
 --
 
 . If you are authenticating to LDAP, a number of configuration options can cause
diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/x-pack/docs/en/settings/security-settings.asciidoc
index cb74babc0244f..8f561401095c2 100644
--- a/x-pack/docs/en/settings/security-settings.asciidoc
+++ b/x-pack/docs/en/settings/security-settings.asciidoc
@@ -1,8 +1,8 @@
 [role="xpack"]
 [[security-settings]]
-=== Security Settings in Elasticsearch
+=== Security settings in {es}
 ++++
-Security Settings
+Security settings
 ++++
 
 By default, {security} is disabled when you have a basic or trial license. To
@@ -23,14 +23,14 @@ For more information about creating and updating the {es} keystore, see
 
 [float]
 [[general-security-settings]]
-==== General Security Settings
+==== General security settings
 `xpack.security.enabled`::
 Set to `true` to enable {security} on the node. +
 +
 If set to `false`, which is the default value for basic and trial licenses,
 {security} is disabled. It also affects all {kib} instances that connect to this
 {es} instance; you do not need to disable {security} in those `kibana.yml` files.
-For more information about disabling {security} in specific {kib} instances, see  {kibana-ref}/security-settings-kb.html[{kib} Security Settings].
+For more information about disabling {security} in specific {kib} instances, see  {kibana-ref}/security-settings-kb.html[{kib} security settings].
 
 `xpack.security.hide_settings`::
 A comma-separated list of settings that are omitted from the results of the
@@ -42,16 +42,16 @@ sensitive nature of the information.
 
 [float]
 [[password-security-settings]]
-==== Default Password Security Settings
+==== Default password security settings
 `xpack.security.authc.accept_default_password`::
 In `elasticsearch.yml`, set this to `false` to disable support for the default "changeme" password.
 
 [float]
 [[anonymous-access-settings]]
-==== Anonymous Access Settings
-
-For more information, see {xpack-ref}/anonymous-access.html[
-Enabling Anonymous Access].
+==== Anonymous access settings
+You can configure the following anonymous access settings in
+`elasticsearch.yml`.  For more information, see {xpack-ref}/anonymous-access.html[
+Enabling anonymous access].
 
 `xpack.security.authc.anonymous.username`::
 The username (principal) of the anonymous user. Defaults to `_es_anonymous_user`.
@@ -69,12 +69,12 @@ access. Defaults to `true`.
 
 [float]
 [[field-document-security-settings]]
-==== Document and Field Level Security Settings
+==== Document and field level security settings
 
 You can set the following document and field level security
 settings in `elasticsearch.yml`. For more information, see
-{xpack-ref}/field-and-document-access-control.html[Setting Up Document and Field
-Level Security].
+{xpack-ref}/field-and-document-access-control.html[Setting up document and field
+level security].
 
 `xpack.security.dls_fls.enabled`::
 Set to `false` to prevent document and field level security
@@ -82,7 +82,7 @@ from being configured. Defaults to `true`.
 
 [float]
 [[token-service-settings]]
-==== Token Service Settings
+==== Token service settings
 
 `xpack.security.authc.token.enabled`::
 Set to `false` to disable the built-in token service. Defaults to `true` unless
@@ -102,7 +102,7 @@ The length of time that a token is valid for. By default this value is `20m` or
 
 [float]
 [[realm-settings]]
-==== Realm Settings
+==== Realm settings
 
 You configure realm settings in the `xpack.security.authc.realms`
 namespace in `elasticsearch.yml`. For example:
@@ -129,10 +129,11 @@ xpack.security.authc.realms:
 ----------------------------------------
 
 The valid settings vary depending on the realm type. For more
-information, see {xpack-ref}/setting-up-authentication.html[Setting Up Authentication].
+information, see {xpack-ref}/setting-up-authentication.html[Setting up authentication].
 
 [float]
-===== Settings Valid for All Realms
+[[ref-realm-settings]]
+===== Settings valid for all realms
 
 `type`::
 The type of the realm: `native, `ldap`, `active_directory`, `pki`, or `file`. Required.
@@ -146,15 +147,40 @@ recommended when you configure multiple realms. Defaults to `Integer.MAX_VALUE`.
 Indicates whether a realm is enabled. You can use this setting to disable a
 realm without removing its configuration information. Defaults to `true`.
 
+[[ref-native-settings]]
+[float]
+===== Native realm settings
+
+For a native realm, the `type` must be set to `native`. In addition to the 
+<>, you can specify  
+the following optional settings: 
+
+`cache.ttl`:: The time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this period of time. Specify the time period using 
+the standard {es} <>. Defaults to `20m`.
+
+`cache.max_users`:: The maximum number of user entries that can live in the 
+cache at any given time. Defaults to 100,000.
+
+`cache.hash_algo`:: (Expert Setting) The hashing algorithm that is used for the 
+in-memory cached user credentials. For possible values, see 
+{xpack-ref}/controlling-user-cache.html[Cache hash algorithms]. Defaults to 
+`ssha256`.
+
+
 [[ref-users-settings]]
 
 [float]
-===== File Realm Settings
+===== File realm settings
+
+The `type` setting must be set to `file`. In addition to the 
+<>, you can specify  
+the following settings: 
 
 `cache.ttl`::
-The time-to-live for cached user entries--user credentials are cached for
-this configured period of time. Defaults to `20m`. Specify values using the
-standard Elasticsearch {ref}/common-options.html#time-units[time units].
+The time-to-live for cached user entries. A user and a hash of its credentials 
+are cached for this configured period of time. Defaults to `20m`. Specify values 
+using the standard {es} {ref}/common-options.html#time-units[time units].
 Defaults to `20m`.
 
 `cache.max_users`::
@@ -168,14 +194,19 @@ all possible values. Defaults to `ssha256`.
 
 [[ref-ldap-settings]]
 [float]
-===== LDAP Realm Settings
+===== LDAP realm settings
 
-`url`::
-An LDAP URL in the format `ldap[s]://:`. Required.
+The `type` setting must be set to `ldap`. In addition to the 
+<>, you can specify the following settings: 
+
+`url`:: Specifies one or more LDAP URLs in the format  
+`ldap[s]://:`. Multiple URLs can be defined using a comma 
+separated value or array syntax: `[ "ldaps://server1:636", "ldaps://server2:636" ]`. 
+`ldaps` and `ldap` URL protocols cannot be mixed in the same realm. Required.
 
 `load_balance.type`::
 The behavior to use when there are multiple LDAP URLs defined. For supported
-values see {xpack-ref}/ldap-realm.html#ldap-load-balancing[LDAP load balancing and failover types].
+values see <>.
 Defaults to `failover`.
 
 `load_balance.cache_ttl`::
@@ -184,36 +215,45 @@ this setting controls the amount of time to cache DNS lookups. Defaults
 to `1h`.
 
 `bind_dn`::
-The DN of the user that will be used to bind to the LDAP and perform searches.
-Only applicable in {xpack-ref}/ldap-realm.html#ldap-user-search[user search mode].
-If this is not specified, an anonymous bind will be attempted.
-Defaults to Empty.
+The DN of the user that is used to bind to the LDAP and perform searches.
+Only applicable in user search mode.
+If not specified, an anonymous bind is attempted.
+Defaults to Empty. Due to its potential security impact, `bind_dn` is not 
+exposed via the <>.
 
 `bind_password`::
-The password for the user that will be used to bind to the LDAP directory.
-Defaults to Empty.
-*Deprecated.* Use `secure_bind_password` instead.
+deprecated[6.3] Use `secure_bind_password` instead. The password for the user 
+that is used to bind to the LDAP directory.
+Defaults to Empty. Due to its potential security impact, `bind_password` is not 
+exposed via the <>.
+
 
 `secure_bind_password` (<>)::
-The password for the user that will be used to bind to the LDAP directory.
+The password for the user that is used to bind to the LDAP directory.
 Defaults to Empty.
 
 `user_dn_templates`::
 The DN template that replaces the user name with the string `{0}`.
-This element is multivalued; you can specify multiple user contexts.
-Required to operate in user template mode. Not valid
-if `user_search.base_dn` is specified. For more information on
+This setting is multivalued; you can specify multiple user contexts.
+Required to operate in user template mode. If `user_search.base_dn` is specified, 
+this setting is not valid. For more information on
 the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms].
++
+--
+NOTE: If any settings starting with `user_search` are specified, the 
+`user_dn_templates` settings are ignored.
+
+--
 
 `user_group_attribute`::
 Specifies the attribute to examine on the user for group membership.
-The default is `memberOf`. This setting will be ignored if any
-`group_search` settings are specified. Defaults to  `memberOf`.
+If any `group_search` settings are specified, this setting is ignored. Defaults 
+to `memberOf`.
 
 `user_search.base_dn`::
 Specifies a container DN to search for users. Required
-to operated in user search mode. Not valid if
-`user_dn_templates is specified. For more information on
+to operated in user search mode. If `user_dn_templates` is specified, this 
+setting is not valid. For more information on
 the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms].
 
 `user_search.scope`::
@@ -224,18 +264,18 @@ The scope of the user search. Valid values are `sub_tree`, `one_level` or
 the only user considered. Defaults to  `sub_tree`.
 
 `user_search.filter`::
-Specifies the filter used to search the directory in attempt to match
+Specifies the filter used to search the directory in attempts to match
 an entry with the username provided by the user. Defaults to `(uid={0})`.
 `{0}` is substituted with the username provided when searching.
 
 `user_search.attribute`::
-This setting is deprecated; use `user_search.filter` instead.
-The attribute to match with the username presented to. Defaults to `uid`.
+deprecated[5.6] Use `user_search.filter` instead.
+The attribute to match with the username sent with the request. Defaults to `uid`.
 
 `user_search.pool.enabled`::
-Enables or disables connection pooling for user search. When
-disabled a new connection is created for every search. The
-default is `true` when `bind_dn` is provided.
+Enables or disables connection pooling for user search. If set to `false`, a new 
+connection is created for every search. The
+default is `true` when `bind_dn` is set.
 
 `user_search.pool.size`::
 The maximum number of connections to the LDAP server to allow in the
@@ -243,17 +283,18 @@ connection pool. Defaults to `20`.
 
 `user_search.pool.initial_size`::
 The initial number of connections to create to the LDAP server on startup.
-Defaults to `0`.
+Defaults to `0`. If the LDAP server is down, values greater than `0` could cause 
+startup failures.
 
 `user_search.pool.health_check.enabled`::
-Flag to enable or disable a health check on LDAP connections in the connection
+Enables or disables a health check on LDAP connections in the connection
 pool. Connections are checked in the background at the specified interval.
 Defaults to `true`.
 
 `user_search.pool.health_check.dn`::
-The distinguished name to be retrieved as part of the health check.
-Defaults to the value of `bind_dn` if present, and if
-not falls back to `user_search.base_dn`.
+The distinguished name that is retrieved as part of the health check.
+Defaults to the value of `bind_dn` if present; if
+not, falls back to `user_search.base_dn`.
 
 `user_search.pool.health_check.interval`::
 The interval to perform background checks of connections in the pool.
@@ -261,7 +302,7 @@ Defaults to `60s`.
 
 `group_search.base_dn`::
 The container DN to search for groups in which the user has membership. When
-this element is absent, Security searches for the attribute specified by
+this element is absent, {security} searches for the attribute specified by
 `user_group_attribute` set on the user in order to determine group membership.
 
 `group_search.scope`::
@@ -271,30 +312,33 @@ Specifies whether the group search should be `sub_tree`, `one_level` or
 `base` specifies that the `base_dn` is a group object, and that it is the
 only group considered. Defaults to  `sub_tree`.
 
-`group_search.filter`::
+`group_search.filter`:: 
+Specifies a filter to use to look up a group. 
 When not set, the realm searches for `group`, `groupOfNames`, `groupOfUniqueNames`,
 or `posixGroup` with the attributes `member`, `memberOf`, or `memberUid`.  Any
 instance of `{0}` in the filter is replaced by the user attribute defined in
 `group_search.user_attribute`.
 
 `group_search.user_attribute`::
-Specifies the user attribute that will be fetched and provided as a parameter to
+Specifies the user attribute that is fetched and provided as a parameter to
 the filter.  If not set, the user DN is passed into the filter. Defaults to Empty.
 
 `unmapped_groups_as_roles`::
-Takes a boolean variable. When this element is set to `true`, the names of any
-LDAP groups that are not referenced in a role-mapping _file_ are used as role
-names and assigned to the user. Defaults to `false`.
+If set to `true`, the names of any unmapped LDAP groups are used as role names 
+and assigned to the user. A group is considered to be _unmapped_ if it is not 
+not referenced in a 
+{xpack-ref}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based 
+role mappings are not considered. Defaults to `false`.
 
 `files.role_mapping`::
 The {xpack-ref}/security-files.html[location] for the {xpack-ref}/mapping-roles.html#mapping-roles[
 YAML role mapping configuration file]. Defaults to
-`CONFIG_DIR/x-pack/role_mapping.yml`.
+`CONFIG_DIR/role_mapping.yml`.
 
 `follow_referrals`::
-Boolean value that specifies whether Securityshould follow referrals returned
+Specifies whether {security} should follow referrals returned
 by the LDAP server. Referrals are URLs returned by the server that are to be
-used to continue the LDAP operation (e.g. search). Defaults to `true`.
+used to continue the LDAP operation (for example, search). Defaults to `true`.
 
 `metadata`::
 A list of additional LDAP attributes that should be loaded from the
@@ -316,7 +360,9 @@ An `s` at the end indicates seconds, or `ms` indicates milliseconds.
 Defaults to `5s` (5 seconds ).
 
 `ssl.key`::
-Path to a PEM encoded file containing the private key.
+Path to a PEM encoded file containing the private key, which is used if the 
+LDAP server requires client authentication. `ssl.key` and `ssl.keystore.path` 
+cannot be used at the same time.
 
 `ssl.key_passphrase`::
 The passphrase that is used to decrypt the private key. This value is
@@ -330,7 +376,9 @@ Path to a PEM encoded file containing the certificate (or certificate chain)
 that will be presented to clients when they connect.
 
 `ssl.certificate_authorities`::
-List of paths to PEM encoded certificate files that should be trusted.
+List of paths to PEM encoded certificate files that should be trusted. 
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the 
+same time.
 
 `ssl.keystore.path`::
 The path to the Java Keystore file that contains a private key and certificate.
@@ -354,7 +402,7 @@ The password for the key in the keystore. Defaults to the keystore password.
 
 `ssl.truststore.path`::
 The path to the Java Keystore file that contains the certificates to trust.
-`ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time.
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the same time.
 
 `ssl.truststore.password`::
 The password to the truststore.
@@ -375,18 +423,19 @@ See <> for an explanation of
 these values.
 
 `ssl.supported_protocols`::
-Supported protocols with versions. Defaults to the value of
+Supported protocols for TLS/SSL (with versions). Defaults to the value of
 `xpack.ssl.supported_protocols`.
 
-`ssl.cipher_suites`
+`ssl.cipher_suites`:: Specifies the cipher suites that should be supported when 
+communicating with the LDAP server. 
 Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[
 Java Cryptography Architecture documentation]. Defaults to the value of
 `xpack.ssl.cipher_suites`.
 
 `cache.ttl`::
-Specifies the time-to-live for cached user entries (a user and its credentials
-are cached for this period of time). Use the standard Elasticsearch
-{ref}/common-options.html#time-units[time units]). Defaults to  `20m`.
+Specifies the time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this period of time. Use the standard {es}
+<>. Defaults to  `20m`.
 
 `cache.max_users`::
 Specifies the maximum number of user entries that the cache can contain.
@@ -394,20 +443,28 @@ Defaults to `100000`.
 
 `cache.hash_algo`::
 (Expert Setting) Specifies the hashing algorithm that is used for the
-in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms]
-table for all possible values). Defaults to `ssha256`.
+in-memory cached user credentials. See {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms]
+table for all possible values. Defaults to `ssha256`.
 
 [[ref-ad-settings]]
 [float]
-===== Active Directory Realm Settings
+===== Active Directory realm settings
+
+The `type` setting must be set to `active_directory`. In addition to the 
+<>, you can specify  
+the following settings: 
 
 `url`::
-A URL in the format `ldap[s]://:`. Defaults to `ldap://:389`.
+An LDAP URL of the form `ldap[s]://:`. {security} attempts to 
+authenticate against this URL. If the URL is not specified, it is derived from 
+the `domain_name` setting and assumes an unencrypted connection to port 389. 
+Defaults to `ldap://:389`. This setting is required when connecting 
+using SSL/TLS or when using a custom port.
 
 `load_balance.type`::
 The behavior to use when there are multiple LDAP URLs defined. For supported
-values see {xpack-ref}/active-directory-realm.html#ad-load-balancing[load balancing and failover types].
-Defaults to  `failover`.
+values see <>.
+Defaults to `failover`.
 
 `load_balance.cache_ttl`::
 When using `dns_failover` or `dns_round_robin` as the load balancing type,
@@ -415,31 +472,34 @@ this setting controls the amount of time to cache DNS lookups. Defaults
 to `1h`.
 
 `domain_name`::
-The domain name of Active Directory. The cluster can derive the URL and
-`user_search_dn` fields from values in this element if those fields are not
-otherwise specified. Required.
+The domain name of Active Directory. If the the `url` and `user_search_dn` 
+settings are not specified, the cluster can derive those values from this 
+setting. Required.
 
 `bind_dn`::
-The DN of the user that will be used to bind to Active Directory and perform searches.
-Defaults to Empty.
+The DN of the user that is used to bind to Active Directory and perform searches.
+Defaults to Empty. Due to its potential security impact, `bind_dn` is not 
+exposed via the <>.
 
 `bind_password`::
-The password for the user that will be used to bind to Active Directory.
-Defaults to Empty.
-*Deprecated.* Use `secure_bind_password` instead.
+deprecated[6.3] Use `secure_bind_password` instead. The password for the user 
+that is used to bind to Active Directory. Defaults to Empty. Due to its 
+potential security impact, `bind_password` is not exposed via the 
+<>.
 
 `secure_bind_password` (<>)::
-The password for the user that will be used to bind to Active Directory.
+The password for the user that is used to bind to Active Directory.
 Defaults to Empty.
 
 `unmapped_groups_as_roles`::
-Takes a boolean variable. When this element is set to `true`, the names of any
-LDAP groups that are not referenced in a role-mapping _file_ are used as role
-names and assigned to the user. Defaults to `false`.
+If set to `true`, the names of any unmapped Active Directory groups are used as 
+role names and assigned to the user. A group is considered _unmapped_ when it 
+is not referenced in any role-mapping files. API-based role mappings are not 
+considered. Defaults to `false`.
 
 `files.role_mapping`::
 The {xpack-ref}/security-files.html[location] for the YAML
-role mapping configuration file. Defaults to  `CONFIG_DIR/x-pack/role_mapping.yml`.
+role mapping configuration file. Defaults to `CONFIG_DIR/role_mapping.yml`.
 
 `user_search.base_dn`::
 The context to search for a user. Defaults to the root
@@ -455,22 +515,27 @@ only user considered. Defaults to `sub_tree`.
 `user_search.filter`::
 Specifies a filter to use to lookup a user given a username.  The default
 filter looks up `user` objects with either `sAMAccountName` or
-`userPrincipalName`.
+`userPrincipalName`. If specified, this must be a valid LDAP user search filter. 
+For example `(&(objectClass=user)(sAMAccountName={0}))`. For more information, 
+see 
+https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
 
 `user_search.upn_filter`::
 Specifies a filter to use to lookup a user given a user principal name.
 The default filter looks up `user` objects with
 a matching `userPrincipalName`. If specified, this
-must be a valid LDAP user search filter, for example
+must be a valid LDAP user search filter. For example,
 `(&(objectClass=user)(userPrincipalName={1}))`. `{1}` is the full user principal name
-provided by the user.
+provided by the user. For more information, see 
+https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
 
 `user_search.down_level_filter`::
 Specifies a filter to use to lookup a user given a down level logon name
 (DOMAIN\user). The default filter looks up `user` objects with a matching
 `sAMAccountName` in the domain provided. If specified, this
-must be a valid LDAP user search filter, for example
-`(&(objectClass=user)(sAMAccountName={0}))`.
+must be a valid LDAP user search filter. For example,
+`(&(objectClass=user)(sAMAccountName={0}))`. For more information, see 
+https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax]. 
 
 `user_search.pool.enabled`::
 Enables or disables connection pooling for user search. When
@@ -483,16 +548,18 @@ connection pool. Defaults to `20`.
 
 `user_search.pool.initial_size`::
 The initial number of connections to create to the Active Directory server on startup.
-Defaults to `0`.
+Defaults to `0`. If the LDAP server is down, values greater than 0 
+could cause startup failures. 
 
 `user_search.pool.health_check.enabled`::
-Flag to enable or disable a health check on Active Directory connections in the connection
+Enables or disables a health check on Active Directory connections in the connection
 pool. Connections are checked in the background at the specified interval.
 Defaults to `true`.
 
 `user_search.pool.health_check.dn`::
 The distinguished name to be retrieved as part of the health check.
-Defaults to the value of `bind_dn` if it is a distinguished name.
+Defaults to the value of `bind_dn` if that setting is present. Otherwise, it 
+defaults to the value of the `user_search.base_dn` setting. 
 
 `user_search.pool.health_check.interval`::
 The interval to perform background checks of connections in the pool.
@@ -500,7 +567,7 @@ Defaults to `60s`.
 
 `group_search.base_dn`::
 The context to search for groups in which the user has membership.  Defaults
-to the root of the  Active Directory domain.
+to the root of the Active Directory domain.
 
 `group_search.scope`::
 Specifies whether the group search should be `sub_tree`, `one_level` or
@@ -530,13 +597,18 @@ Defaults to `5s` (5 seconds ).
 
 `ssl.certificate`::
 Path to a PEM encoded file containing the certificate (or certificate chain)
-that will be presented to clients when they connect.
+that will be presented to clients when they connect. 
 
 `ssl.certificate_authorities`::
-List of paths to PEM encoded certificate files that should be trusted.
+List of paths to PEM encoded certificate files that should be trusted. 
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the 
+same time.
 
 `ssl.key`::
-Path to the PEM encoded file containing the private key.
+Path to the PEM encoded file containing the private key, which is used when the 
+Active Directory server requires client authentication. `ssl.key` and 
+`ssl.keystore.path` cannot be used at the same time.
+
 
 `ssl.key_passphrase`::
 The passphrase that is used to decrypt the private key. This value is
@@ -560,6 +632,7 @@ The password to the keystore.
 
 `ssl.keystore.path`::
 The path to the Java Keystore file that contains a private key and certificate.
+`ssl.key` and `ssl.keystore.path` cannot be used at the same time.
 
 `ssl.keystore.type`::
 The format of the keystore file. Should be either `jks` to use the Java
@@ -573,6 +646,8 @@ The password to the truststore.
 
 `ssl.truststore.path`::
 The path to the Java Keystore file that contains the certificates to trust.
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the 
+same time.
 
 `ssl.truststore.type`::
 The format of the truststore file. Should be either `jks` to use the Java
@@ -587,17 +662,18 @@ See <> for an explanation of
 these values.
 
 `ssl.supported_protocols`::
-Supported protocols with versions. Defaults to the value of
+Supported protocols for TLS/SSL (with versions). Defaults to the value of
 `xpack.ssl.supported_protocols`.
 
-`ssl.cipher_suites`::
+`ssl.cipher_suites`:: Specifies the cipher suites that should be supported when 
+communicating with the Active Directory server. 
 Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[
 Java Cryptography Architecture documentation]. Defaults to the value of
 `xpack.ssl.cipher_suites`.
 
 `cache.ttl`::
-Specifies the time-to-live for cached user entries (user
-credentials are cached for this configured period of time). Use the
+Specifies the time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this configured period of time. Use the
 standard Elasticsearch {ref}/common-options.html#time-units[time units]).
 Defaults to `20m`.
 
@@ -606,12 +682,21 @@ Specifies the maximum number of user entries that the cache can contain.
 Defaults to `100000`.
 
 `cache.hash_algo`::
-(Expert Setting) Specifies the hashing algorithm that will be used for
+(Expert Setting) Specifies the hashing algorithm that is used for
 the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.html#controlling-user-cache[Cache hash algorithms] table for all possible values). Defaults to `ssha256`.
 
+`follow_referrals`::
+If set to `true` {security} follows referrals returned by the LDAP server. 
+Referrals are URLs returned by the server that are to be used to continue the 
+LDAP operation (such as `search`). Defaults to `true`.
+
 [[ref-pki-settings]]
 [float]
-===== PKI Realm Settings
+===== PKI realm settings
+
+The `type` setting must be set to `pki`. In addition to the 
+<>, you can specify  
+the following settings: 
 
 `username_pattern`::
 The regular expression pattern used to extract the username from the
@@ -621,9 +706,7 @@ Defaults to `CN=(.*?)(?:,\|$)`.
 `certificate_authorities`::
 List of paths to the PEM certificate files that should be used to authenticate a
 user's certificate as trusted. Defaults to the trusted certificates configured
-for SSL. See the {xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings]
-section of the PKI realm documentation for more information.
-This setting cannot be used with `truststore.path`.
+for SSL. This setting cannot be used with `truststore.path`. 
 
 `truststore.algorithm`::
 Algorithm for the truststore. Defaults to `SunX509`.
@@ -636,19 +719,17 @@ The password for the truststore.
 
 `truststore.path`::
 The path of a truststore to use. Defaults to the trusted certificates configured
-for SSL. See the
-{xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] section of the PKI realm
-documentation for more information. This setting cannot be used with
-`certificate_authorities`.
+for SSL. This setting cannot be used with `certificate_authorities`.
 
 `files.role_mapping`::
 Specifies the {xpack-ref}/security-files.html[location] of the
 {xpack-ref}/mapping-roles.html[YAML role  mapping configuration file].
-Defaults to `CONFIG_DIR/x-pack/role_mapping.yml`.
+Defaults to `CONFIG_DIR/role_mapping.yml`.
 
 `cache.ttl`::
-Specifies the time-to-live for cached user entries. Use the
-standard Elasticsearch {ref}/common-options.html#time-units[time units]).
+Specifies the time-to-live for cached user entries. A user and a hash of its 
+credentials are cached for this period of time. Use the
+standard {es} {ref}/common-options.html#time-units[time units]).
 Defaults to `20m`.
 
 `cache.max_users`::
@@ -657,10 +738,17 @@ Defaults to `100000`.
 
 [[ref-saml-settings]]
 [float]
-===== SAML Realm Settings
+===== SAML realm settings
+
+The `type` setting must be set to `saml`. In addition to the 
+<>, you can specify  
+the following settings: 
 
 `idp.entity_id`::
-The Entity ID of the SAML Identity Provider
+The Entity ID of the SAML Identity Provider. An Entity ID is a URI with a 
+maximum length of 1024 characters. It can be a URL (https://idp.example.com/) or 
+a URN (`urn:example.com:idp`) and can be found in the configuration or the SAML 
+metadata of the Identity Provider.
 
 `idp.metadata.path`::
 The path _(recommended)_ or URL to a SAML 2.0 metadata file describing the
@@ -668,7 +756,7 @@ capabilities and configuration of the Identity Provider.
 If a path is provided, then it is resolved relative to the {es} config
 directory.
 If a URL is provided, then it must be either a `file` URL or a `https` URL.
-{security} will automatically poll this metadata resource and will reload
+{security} automatically polls this metadata resource and reloads 
 the IdP configuration when changes are detected.
 File based resources are polled at a frequency determined by the global {es}
 `resource.reload.interval.high` setting, which defaults to 5 seconds.
@@ -685,39 +773,47 @@ Indicates whether to utilise the Identity Provider's Single Logout service
 Defaults to `true`.
 
 `sp.entity_id`::
-The Entity ID to use for this SAML Service Provider, entered as a URI.
+The Entity ID to use for this SAML Service Provider. This should be entered as a 
+URI. We recommend that you use the base URL of your Kibana instance. For example, 
+`https://kibana.example.com/`. 
 
 `sp.acs`::
-The URL of the Assertion Consumer Service within {kib}.
+The URL of the Assertion Consumer Service within {kib}. Typically this is the 
+"api/security/v1/saml" endpoint of your Kibana server. For example, 
+`https://kibana.example.com/api/security/v1/saml`. 
 
 `sp.logout`::
-The URL of the Single Logout service within {kib}.
+The URL of the Single Logout service within {kib}. Typically this is the 
+"logout" endpoint of your Kibana server. For example, 
+`https://kibana.example.com/logout`.
 
 `attributes.principal`::
 The Name of the SAML attribute that should be used as the {security} user's
-principal (username)
+principal (username).
 
 `attributes.groups`::
 The Name of the SAML attribute that should be used to populate {security}
-user's groups
+user's groups. 
 
 `attributes.name`::
 The Name of the SAML attribute that should be used to populate {security}
-user's full name
+user's full name. 
 
 `attributes.mail`::
 The Name of the SAML attribute that should be used to populate {security}
-user's email address
+user's email address. 
 
 `attributes.dn`::
 The Name of the SAML attribute that should be used to populate {security}
-user's X.500 _Distinguished Name_
+user's X.500 _Distinguished Name_. 
 
 `attribute_patterns.principal`::
-A java regular expression that is matched against the SAML attribute specified
+A Java regular expression that is matched against the SAML attribute specified
 by `attributes.pattern` before it is applied to the user's _principal_ property.
-The attribute value must match the pattern, and the value of the first
-_capturing group_ is used as the principal.
+The attribute value must match the pattern and the value of the first
+_capturing group_ is used as the principal. For example, `^([^@]+)@example\\.com$` 
+matches email addresses from the "example.com" domain and uses the local-part as 
+the principal.
 
 `attribute_patterns.groups`::
 As per `attribute_patterns.principal`, but for the _group_ property.
@@ -733,26 +829,41 @@ As per `attribute_patterns.principal`, but for the _dn_ property.
 
 `nameid_format`::
 The NameID format that should be requested when asking the IdP to authenticate
-the current user.
-Defaults to `urn:oasis:names:tc:SAML:2.0:nameid-format:transient`
+the current user. Defaults to requesting _transient_ names 
+(`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`). 
+
+`nameid.allow_create`:: The value of the `AllowCreate` attribute of the 
+`NameIdPolicy` element in an authentication request. Defaults to `false`. 
+
+`nameid.sp_qualifier`:: The value of the `SPNameQualifier` attribute of the 
+`NameIdPolicy` element in an authentication request. The default is to not 
+include the `SPNameQualifier` attribute.
 
 `force_authn`::
-Whether to set the `ForceAuthn` attribute when requesting that the IdP
-authenticate the current user.
+Specifies whether to set the `ForceAuthn` attribute when requesting that the IdP
+authenticate the current user. If set to `true`, the IdP is required to verify 
+the user’s identity, irrespective of any existing sessions they might have. 
 Defaults to `false`.
 
 `populate_user_metadata`::
-Whether to populate the {es} user's metadata with the values that are provided
-by the SAML attributes.
-Defaults to `true`.
+Specifies whether to populate the {es} user's metadata with the values that are 
+provided by the SAML attributes. Defaults to `true`.
 
 `allowed_clock_skew`::
 The maximum amount of skew that can be tolerated between the IdP's clock and the
 {es} node's clock.
 Defaults to `3m` (3 minutes).
 
+[float]
+[[ref-saml-signing-settings]]
+===== SAML realm signing settings
+
+If a signing key is configured (that is, either `signing.key` or 
+`signing.keystore.path` is set), then {security} signs outgoing SAML messages. 
+Signing can be configured using the following settings:
+
 `signing.saml_messages`::
-A list of SAML message types that should be signed, or `*` to sign all messages.
+A list of SAML message types that should be signed or `*` to sign all messages.
 Each element in the list should be the local name of a SAML XML Element.
 Supported element types are `AuthnRequest`, `LogoutRequest` and `LogoutResponse`.
 Only valid if `signing.key` or `signing.keystore.path` is also specified.
@@ -760,152 +871,177 @@ Defaults to `*`.
 
 `signing.key`::
 Specifies the path to the PEM encoded private key to use for SAML message signing.
-`signing.key` and `signing.keystore.path` may not be used at the same time.
+`signing.key` and `signing.keystore.path` cannot be used at the same time.
 
 `signing.secure_key_passphrase` (<>)::
 Specifies the passphrase to decrypt the PEM encoded private key (`signing.key`)
 if it is encrypted.
 
 `signing.certificate`::
-Specifies the path to the PEM encoded certificate that corresponds to the
-`signing.key`.  May only be used if `signing.key` is set.
+Specifies the path to the PEM encoded certificate (or certificate chain) that 
+corresponds to the `signing.key`. This certificate must also be included in the 
+Service Provider metadata or manually configured within the IdP to allow for 
+signature validation. This setting can only be used if `signing.key` is set.
 
 `signing.keystore.path`::
 The path to the keystore that contains a private key and certificate.
 Must be either a Java Keystore (jks) or a PKCS#12 file.
-`signing.key` and `signing.keystore.path` may not be used at the same time.
+`signing.key` and `signing.keystore.path` cannot be used at the same time.
 
 `signing.keystore.type`::
-The type of the keystore (`signing.keystore.path`).
-Must be one of "jks" or "PKCS12".  Defaults to "PKCS12" if the keystore path
-ends in ".p12", ".pfx" or "pkcs12", otherwise uses "jks".
+The type of the keystore in `signing.keystore.path`.
+Must be either `jks` or `PKCS12`. If the keystore path ends in ".p12", ".pfx", 
+or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`.
 
 `signing.keystore.alias`::
 Specifies the alias of the key within the keystore that should be
-used for SAML message signing. Must be specified if the keystore
-contains more than one private key.
+used for SAML message signing. If the keystore contains more than one private 
+key, this setting must be specified. 
 
 `signing.keystore.secure_password` (<>)::
-The password to the keystore (`signing.keystore.path`).
+The password to the keystore in `signing.keystore.path`.
 
 `signing.keystore.secure_key_password` (<>)::
 The password for the key in the keystore (`signing.keystore.path`).
 Defaults to the keystore password.
 
+[float]
+[[ref-saml-encryption-settings]]
+===== SAML realm encryption settings
+
+If an encryption key is configured (that is, either `encryption.key` or 
+`encryption.keystore.path` is set), then {security} publishes an encryption 
+certificate when generating metadata and attempts to decrypt incoming SAML 
+content. Encryption can be configured using the following settings:
+
 `encryption.key`::
 Specifies the path to the PEM encoded private key to use for SAML message
 decryption.
-`encryption.key` and `encryption.keystore.path` may not be used at the same time.
+`encryption.key` and `encryption.keystore.path` cannot be used at the same time.
 
 `encryption.secure_key_passphrase` (<>)::
 Specifies the passphrase to decrypt the PEM encoded private key
 (`encryption.key`) if it is encrypted.
 
 `encryption.certificate`::
-Specifies the path to the PEM encoded certificate chain that is associated with
-the `encryption.key`.  May only be used if `encryption.key` is set.
+Specifies the path to the PEM encoded certificate (or certificate chain) that is 
+associated with the `encryption.key`. This certificate must also be included in 
+the Service Provider metadata or manually configured within the IdP to enable 
+message encryption. This setting can be used only if `encryption.key` is set.
 
 `encryption.keystore.path`::
 The path to the keystore that contains a private key and certificate.
 Must be either a Java Keystore (jks) or a PKCS#12 file.
-`encryption.key` and `encryption.keystore.path` may not be used at the same time.
+`encryption.key` and `encryption.keystore.path` cannot be used at the same time.
 
 `encryption.keystore.type`::
 The type of the keystore (`encryption.keystore.path`).
-Must be one of "jks" or "PKCS12".  Defaults to "PKCS12" if the keystore path
-ends in ".p12", ".pfx" or "pkcs12", otherwise uses "jks".
+Must be either `jks` or `PKCS12`. If the keystore path ends in ".p12", ".pfx", 
+or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`.
 
 `encryption.keystore.alias`::
 Specifies the alias of the key within the keystore (`encryption.keystore.path`)
 that should be used for SAML message decryption. If not specified, all compatible
-key pairs from the keystore will be considered as candidate keys for decryption.
+key pairs from the keystore are considered as candidate keys for decryption.
 
 `encryption.keystore.secure_password` (<>)::
 The password to the keystore (`encryption.keystore.path`).
 
 `encryption.keystore.secure_key_password` (<>)::
 The password for the key in the keystore (`encryption.keystore.path`). Only a
-single password is supported. If you are using multiple decryption keys, then
+single password is supported. If you are using multiple decryption keys, 
 they cannot have individual passwords.
 
+[float]
+[[ref-saml-ssl-settings]]
+===== SAML realm SSL settings
+
+If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path` 
+is a URL using the `https` protocol), the following settings can be used to 
+configure SSL. If these are not specified, then the 
+<> are used.
+
+NOTE: These settings are not used for any purpose other than loading metadata 
+over https.
+
 `ssl.key`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
-path to the PEM encoded private key to use for http client authentication (if
-required). `ssl.key` and `ssl.keystore.path` may not be used at the same time.
+Specifies the path to the PEM encoded private key to use for http client 
+authentication (if required). `ssl.key` and `ssl.keystore.path` cannot be used 
+at the same time.
 
 `ssl.key_passphrase`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
+Specifies the
 passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
-encrypted. May not be used with `ssl.secure_key_passphrase`
+encrypted. Cannot be used with `ssl.secure_key_passphrase`. 
 
 `ssl.secure_key_passphrase` (<>)::
-If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
+Specifies the
 passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
-encrypted. May not be used with `ssl.key_passphrase`
+encrypted. Cannot be used with `ssl.key_passphrase`. 
 
 `ssl.certificate`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
+Specifies the
 path to the PEM encoded certificate (or certificate chain) that is associated
-with the key (`ssl.key`). May only be used if `ssl.key` is set.
+with the key (`ssl.key`). This setting can be used only if `ssl.key` is set.
 
 `ssl.certificate_authorities`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
+Specifies the
 paths to the PEM encoded certificate authority certificates that should be
-trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be
+trusted. `ssl.certificate_authorities` and `ssl.truststore.path` cannot be
 used at the same time.
 
 `ssl.keystore.path`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), the path to
+Specifies the path to
 the keystore that contains a private key and certificate.
 Must be either a Java Keystore (jks) or a PKCS#12 file.
-`ssl.key` and `ssl.keystore.path` may not be used at the same time.
+`ssl.key` and `ssl.keystore.path` cannot be used at the same time.
 
 `ssl.keystore.type`::
-The type of the keystore (`ssl.keystore.path`). Must be one of "jks" or "PKCS12".
-Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or "pkcs12",
-otherwise uses "jks"
+The type of the keystore (`ssl.keystore.path`). Must be either `jks` or `PKCS12`.
+If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults 
+to `PKCS12`. Otherwise, it defaults to `jks`.
 
 `ssl.keystore.password`::
-The password to the keystore (`ssl.keystore.path`).
-May not be used with `ssl.keystore.secure_password`.
+The password to the keystore (`ssl.keystore.path`). This setting cannot be used 
+with `ssl.keystore.secure_password`.
 
 `ssl.keystore.secure_password` (<>)::
 The password to the keystore (`ssl.keystore.path`).
-May not be used with `ssl.keystore.password`.
+This setting cannot be used with `ssl.keystore.password`.
 
 `ssl.keystore.key_password`::
 The password for the key in the keystore (`ssl.keystore.path`).
-Defaults to the keystore password.
-May not be used with `ssl.keystore.secure_key_password`.
+Defaults to the keystore password. This setting cannot be used with 
+`ssl.keystore.secure_key_password`.
 
 `ssl.keystore.secure_key_password` (<>)::
 The password for the key in the keystore (`ssl.keystore.path`).
-Defaults to the keystore password.
-May not be used with `ssl.keystore.key_password`.
+Defaults to the keystore password. This setting cannot be used with 
+`ssl.keystore.key_password`.
 
 `ssl.truststore.path`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), the path to the
+The path to the
 keystore that contains the certificates to trust.
 Must be either a Java Keystore (jks) or a PKCS#12 file.
-`ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the
+`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the
 same time.
 
 `ssl.truststore.type`::
-The type of the truststore (`ssl.truststore.path`). Must be one of "jks" or "PKCS12".
-Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or "pkcs12",
-otherwise uses "jks"
+The type of the truststore (`ssl.truststore.path`). Must be either `jks` or 
+`PKCS12`. If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting 
+defaults to `PKCS12`. Otherwise, it defaults to `jks`.
 
 `ssl.truststore.password`::
-The password to the truststore (`ssl.truststore.path`).
-May not be used with `ssl.truststore.secure_password`.
+The password to the truststore (`ssl.truststore.path`). This setting cannot be 
+used with `ssl.truststore.secure_password`.
 
 `ssl.truststore.secure_password` (<>)::
-The password to the truststore (`ssl.truststore.path`).
-May not be used with `ssl.truststore.password`.
+The password to the truststore (`ssl.truststore.path`). This setting cannot be 
+used with `ssl.truststore.password`.
 
 `ssl.verification_mode`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), one of `full`
-(verify the hostname and the certicate path), `certificate` (verify the
+One of `full`
+(verify the hostname and the certificate path), `certificate` (verify the
 certificate path, but not the hostname) or `none` (perform no verification).
 Defaults to `full`.
 +
@@ -913,20 +1049,45 @@ See <> for a more detailed
 explanation of these values.
 
 `ssl.supported_protocols`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
-supported protocols for TLS/SSL.
+Specifies the supported protocols for TLS/SSL.
 
 `ssl.cipher_suites`::
-If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the
+Specifies the
 cipher suites that should be supported.
 
+[float]
+[[load-balancing]]
+===== Load balancing and failover
+
+The `load_balance.type` setting can have the following values: 
+
+* `failover`: The URLs specified are used in the order that they are specified. 
+The first server that can be connected to will be used for all subsequent 
+connections. If a connection to that server fails then the next server that a 
+connection can be established to will be used for subsequent connections.
+* `dns_failover`: In this mode of operation, only a single URL may be specified.
+This URL must contain a DNS name. The system will be queried for all IP 
+addresses that correspond to this DNS name. Connections to the Active Directory 
+or LDAP server will always be tried in the order in which they were retrieved. 
+This differs from `failover` in that there is no reordering of the list and if a 
+server has failed at the beginning of the list, it will still be tried for each 
+subsequent connection.
+* `round_robin`: Connections will continuously iterate through the list of 
+provided URLs. If a server is unavailable, iterating through the list of URLs 
+will continue until a successful connection is made.
+* `dns_round_robin`: In this mode of operation, only a single URL may be 
+specified. This URL must contain a DNS name. The system will be queried for all 
+IP addresses that correspond to this DNS name. Connections will continuously 
+iterate through the list of addresses. If a server is unavailable, iterating 
+through the list of URLs will continue until a successful connection is made.
+
 [float]
 [[ssl-tls-settings]]
-==== Default TLS/SSL Settings
+==== Default TLS/SSL settings
 
 You can configure the following TLS/SSL settings in
 `elasticsearch.yml`. For more information, see
-{xpack-ref}/encrypting-communications.html[Encrypting Communications]. These settings will be used
+{xpack-ref}/encrypting-communications.html[Encrypting communications]. These settings will be used
 for all of {xpack} unless they have been overridden by more specific
 settings such as those for HTTP or Transport.
 
@@ -969,7 +1130,7 @@ Jurisdiction Policy Files_ has been installed, the default value also includes `
 
 [float]
 [[tls-ssl-key-settings]]
-===== Default TLS/SSL Key and Trusted Certificate Settings
+===== Default TLS/SSL key and trusted certificate settings
 
 The following settings are used to specify a private key, certificate, and the
 trusted certificates that should be used when communicating over an SSL/TLS connection.
@@ -979,7 +1140,7 @@ trusted along with the certificate(s) from the <>
 are also available for each transport profile. By default, the settings for a
@@ -1105,9 +1266,9 @@ setting, this would be `transport.profiles.$PROFILE.xpack.security.ssl.key`.
 
 [float]
 [[ip-filtering-settings]]
-==== IP Filtering Settings
+==== IP filtering settings
 
-You can configure the following settings for {xpack-ref}/ip-filtering.html[IP filtering]:
+You can configure the following settings for {xpack-ref}/ip-filtering.html[IP filtering].
 
 `xpack.security.transport.filter.allow`::
 List of IP addresses to allow.
diff --git a/x-pack/docs/en/watcher/encrypting-data.asciidoc b/x-pack/docs/en/watcher/encrypting-data.asciidoc
index ca06d9666bb76..166ef6f14d760 100644
--- a/x-pack/docs/en/watcher/encrypting-data.asciidoc
+++ b/x-pack/docs/en/watcher/encrypting-data.asciidoc
@@ -8,7 +8,7 @@ cluster.
 
 To encrypt sensitive data in {watcher}:
 
-. Use the {ref}/syskeygen.html[syskeygen] command to create a system key file.
+. Use the {ref}/syskeygen.html[elasticsearch-syskeygen] command to create a system key file.
 
 . Copy the `system_key` file to all of the nodes in your cluster.
 +
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java
index d2d4461b93108..21381b376925d 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java
@@ -41,11 +41,11 @@ public void deleteLicense(DeleteLicenseRequest request, ActionListener {
 
+    private boolean acknowledge = false;
     private String type;
 
     @Override
@@ -31,25 +32,46 @@ public String getType() {
         return type;
     }
 
+    public PostStartTrialRequest acknowledge(boolean acknowledge) {
+        this.acknowledge = acknowledge;
+        return this;
+    }
+
+    public boolean isAcknowledged() {
+        return acknowledge;
+    }
+
     @Override
     public void readFrom(StreamInput in) throws IOException {
         super.readFrom(in);
         if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
             type = in.readString();
+            acknowledge = in.readBoolean();
         } else {
             type = "trial";
+            acknowledge = true;
         }
     }
 
     @Override
     public void writeTo(StreamOutput out) throws IOException {
-        super.writeTo(out);
         Version version = Version.V_6_3_0;
         if (out.getVersion().onOrAfter(version)) {
+            super.writeTo(out);
             out.writeString(type);
+            out.writeBoolean(acknowledge);
         } else {
-            throw new IllegalArgumentException("All nodes in cluster must be version [" + version
-                    + "] or newer to use `type` parameter. Attempting to write to node with version [" + out.getVersion() + "].");
+            if ("trial".equals(type) == false) {
+                throw new IllegalArgumentException("All nodes in cluster must be version [" + version
+                        + "] or newer to start trial with a different type than 'trial'. Attempting to write to " +
+                        "a node with version [" + out.getVersion() + "] with trial type [" + type + "].");
+            } else if (acknowledge == false) {
+                throw new IllegalArgumentException("Request must be acknowledged to send to a node with a version " +
+                        "prior to [" + version + "]. Attempting to send request to node with version [" + out.getVersion() + "] " +
+                        "without acknowledgement.");
+            } else {
+                super.writeTo(out);
+            }
         }
     }
 }
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java
index af381e13517f8..6b0beba171bdd 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java
@@ -14,4 +14,9 @@ class PostStartTrialRequestBuilder extends ActionRequestBuilder acknowledgeMessages;
+    private String acknowledgeMessage;
 
     PostStartTrialResponse() {
     }
 
     PostStartTrialResponse(Status status) {
+        this(status, Collections.emptyMap(), null);
+    }
+
+    PostStartTrialResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) {
         this.status = status;
+        this.acknowledgeMessages = acknowledgeMessages;
+        this.acknowledgeMessage = acknowledgeMessage;
     }
 
     public Status getStatus() {
@@ -57,10 +76,56 @@ public Status getStatus() {
     @Override
     public void readFrom(StreamInput in) throws IOException {
         status = in.readEnum(Status.class);
+        if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
+            acknowledgeMessage = in.readOptionalString();
+            int size = in.readVInt();
+            Map acknowledgeMessages = new HashMap<>(size);
+            for (int i = 0; i < size; i++) {
+                String feature = in.readString();
+                int nMessages = in.readVInt();
+                String[] messages = new String[nMessages];
+                for (int j = 0; j < nMessages; j++) {
+                    messages[j] = in.readString();
+                }
+                acknowledgeMessages.put(feature, messages);
+            }
+            this.acknowledgeMessages = acknowledgeMessages;
+        } else {
+            this.acknowledgeMessages = Collections.emptyMap();
+        }
     }
 
     @Override
     public void writeTo(StreamOutput out) throws IOException {
-        out.writeEnum(status);
+        Version version = Version.V_6_3_0;
+        if (out.getVersion().onOrAfter(version)) {
+            out.writeEnum(status);
+            out.writeOptionalString(acknowledgeMessage);
+            out.writeVInt(acknowledgeMessages.size());
+            for (Map.Entry entry : acknowledgeMessages.entrySet()) {
+                out.writeString(entry.getKey());
+                out.writeVInt(entry.getValue().length);
+                for (String message : entry.getValue()) {
+                    out.writeString(message);
+                }
+            }
+        } else {
+            if (status == Status.UPGRADED_TO_TRIAL) {
+                out.writeEnum(Pre63Status.UPGRADED_TO_TRIAL);
+            } else if (status == Status.TRIAL_ALREADY_ACTIVATED) {
+                out.writeEnum(Pre63Status.TRIAL_ALREADY_ACTIVATED);
+            } else {
+                throw new IllegalArgumentException("Starting trial on node with version [" + Version.CURRENT + "] requires " +
+                        "acknowledgement parameter.");
+            }
+        }
+    }
+
+    Map getAcknowledgementMessages() {
+        return acknowledgeMessages;
+    }
+
+    String getAcknowledgementMessage() {
+        return acknowledgeMessage;
     }
 }
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java
index ebd43318ff91e..a136f2a88a65d 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java
@@ -29,7 +29,7 @@ public class RestGetTrialStatus extends XPackRestHandler {
 
     @Override
     protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException {
-        return channel -> client.licensing().prepareGetUpgradeToTrial().execute(
+        return channel -> client.licensing().prepareGetStartTrial().execute(
                 new RestBuilderListener(channel) {
                     @Override
                     public RestResponse buildResponse(GetTrialStatusResponse response, XContentBuilder builder) throws Exception {
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java
index 0332eedd69dd1..af738b9aadf7f 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java
@@ -16,6 +16,7 @@
 import org.elasticsearch.xpack.core.rest.XPackRestHandler;
 
 import java.io.IOException;
+import java.util.Map;
 
 import static org.elasticsearch.rest.RestRequest.Method.POST;
 
@@ -30,23 +31,36 @@ public class RestPostStartTrialLicense extends XPackRestHandler {
     protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException {
         PostStartTrialRequest startTrialRequest = new PostStartTrialRequest();
         startTrialRequest.setType(request.param("type", "trial"));
+        startTrialRequest.acknowledge(request.paramAsBoolean("acknowledge", false));
         return channel -> client.licensing().postStartTrial(startTrialRequest,
                 new RestBuilderListener(channel) {
                     @Override
                     public RestResponse buildResponse(PostStartTrialResponse response, XContentBuilder builder) throws Exception {
                         PostStartTrialResponse.Status status = response.getStatus();
+                        builder.startObject();
+                        builder.field("acknowledged", startTrialRequest.isAcknowledged());
                         if (status.isTrialStarted()) {
-                            builder.startObject()
-                                    .field("trial_was_started", true)
-                                    .field("type", startTrialRequest.getType())
-                                    .endObject();
+                            builder.field("trial_was_started", true);
+                            builder.field("type", startTrialRequest.getType());
                         } else {
-                            builder.startObject()
-                                    .field("trial_was_started", false)
-                                    .field("error_message", status.getErrorMessage())
-                                    .endObject();
+                            builder.field("trial_was_started", false);
+                            builder.field("error_message", status.getErrorMessage());
+                        }
 
+                        Map acknowledgementMessages = response.getAcknowledgementMessages();
+                        if (acknowledgementMessages.isEmpty() == false) {
+                            builder.startObject("acknowledge");
+                            builder.field("message", response.getAcknowledgementMessage());
+                            for (Map.Entry entry : acknowledgementMessages.entrySet()) {
+                                builder.startArray(entry.getKey());
+                                for (String message : entry.getValue()) {
+                                    builder.value(message);
+                                }
+                                builder.endArray();
+                            }
+                            builder.endObject();
                         }
+                        builder.endObject();
                         return new BytesRestResponse(status.getRestStatus(), builder);
                     }
                 });
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java
index 3ca8dbf0eaa4e..355672dedf717 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java
@@ -15,10 +15,23 @@
 import org.elasticsearch.common.Nullable;
 
 import java.time.Clock;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 public class StartTrialClusterTask extends ClusterStateUpdateTask {
 
+    private static final String ACKNOWLEDGEMENT_HEADER = "This API initiates a free 30-day trial for all platinum features. " +
+            "By starting this trial, you agree that it is subject to the terms and conditions at" +
+            " https://www.elastic.co/legal/trial_license/. To begin your free trial, call /start_trial again and specify " +
+            "the \"acknowledge=true\" parameter.";
+
+    private static final Map ACK_MESSAGES = Collections.singletonMap("security",
+            new String[] {"With a trial license, X-Pack security features are available, but are not enabled by default."});
+
     private final Logger logger;
     private final String clusterName;
     private final PostStartTrialRequest request;
@@ -39,7 +52,10 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
         LicensesMetaData oldLicensesMetaData = oldState.metaData().custom(LicensesMetaData.TYPE);
         logger.debug("started self generated trial license: {}", oldLicensesMetaData);
 
-        if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) {
+        if (request.isAcknowledged() == false) {
+            listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.NEED_ACKNOWLEDGEMENT,
+                    ACK_MESSAGES, ACKNOWLEDGEMENT_HEADER));
+        } else if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) {
             listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.UPGRADED_TO_TRIAL));
         } else {
             listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.TRIAL_ALREADY_ACTIVATED));
@@ -50,7 +66,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
     public ClusterState execute(ClusterState currentState) throws Exception {
         LicensesMetaData currentLicensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE);
 
-        if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) {
+        if (request.isAcknowledged() == false) {
+            return currentState;
+        } else if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) {
             long issueDate = clock.millis();
             MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
             long expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis();
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java
index f0bc4b98db1b4..7f4da3fbf1a40 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java
@@ -6,7 +6,6 @@
 package org.elasticsearch.xpack.core.scheduler;
 
 import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.xcontent.ToXContent;
 import org.elasticsearch.common.xcontent.ToXContentFragment;
 import org.elasticsearch.common.xcontent.XContentBuilder;
 import org.joda.time.DateTimeZone;
@@ -29,8 +28,7 @@
 
 
 /**
- *
- * THIS CLASS IS A COPY OF
+ * THIS CLASS IS A FORK OF
  * 
  *     {@code CronExpression}
  * FROM THE QUARTZ PROJECT
@@ -44,63 +42,63 @@
  * Cron expressions are comprised of 6 required fields and one optional field
  * separated by white space. The fields respectively are described as follows:
  *
- * 
+ * 
* * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * * - * - * - * - * - * + * + * + * + * + * * *
Fields in cron expressions
Field Name Allowed Values Allowed Special CharactersField Name Allowed Values Allowed Special Characters
Seconds 0-59 , - * /Seconds 0-59 , - * /
Minutes 0-59 , - * /Minutes 0-59 , - * /
Hours 0-23 , - * /Hours 0-23 , - * /
Day-of-month 1-31 , - * ? / L WDay-of-month 1-31 , - * ? / L W
Month 0-11 or JAN-DEC , - * /Month 0-11 or JAN-DEC , - * /
Day-of-Week 1-7 or SUN-SAT , - * ? / L #Day-of-Week 1-7 or SUN-SAT , - * ? / L #
Year (Optional) empty, 1970-2199 , - * /Year (Optional) empty, 1970-2199 , - * /
*

diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java index b85a3480fa739..f3ed04ed22dfe 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java @@ -64,7 +64,7 @@ public void testLicenseMetadataParsingDoesNotSwallowOtherMetaData() throws Excep License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2)); LicensesMetaData licensesMetaData = new LicensesMetaData(license, Version.CURRENT); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); - RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); + RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(Collections.singletonList(repositoryMetaData)); final MetaData.Builder metaDataBuilder = MetaData.builder(); if (randomBoolean()) { // random order of insertion metaDataBuilder.putCustom(licensesMetaData.getWriteableName(), licensesMetaData); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java index d673c4e720452..b7a09d24b1359 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -56,33 +56,47 @@ public void testStartTrial() throws Exception { assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals("{\"eligible_to_start_trial\":true}", body); - String type = randomFrom(LicenseService.VALID_TRIAL_TYPES); - - Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + type); + // Test that starting will fail without acknowledgement + Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial"); String body2 = Streams.copyToString(new InputStreamReader(response2.getEntity().getContent(), StandardCharsets.UTF_8)); assertEquals(200, response2.getStatusLine().getStatusCode()); - assertTrue(body2.contains("\"trial_was_started\":true")); - assertTrue(body2.contains("\"type\":\"" + type + "\"")); + assertTrue(body2.contains("\"trial_was_started\":false")); + assertTrue(body2.contains("\"error_message\":\"Operation failed: Needs acknowledgement.\"")); + assertTrue(body2.contains("\"acknowledged\":false")); assertBusy(() -> { - GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get(); - assertEquals(type, postTrialLicenseResponse.license().type()); + GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals("basic", getLicenseResponse.license().type()); }); - Response response3 = restClient.performRequest("GET", "/_xpack/license/trial_status"); + String type = randomFrom(LicenseService.VALID_TRIAL_TYPES); + + Response response3 = restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + type); String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8)); assertEquals(200, response3.getStatusLine().getStatusCode()); - assertEquals("{\"eligible_to_start_trial\":false}", body3); + assertTrue(body3.contains("\"trial_was_started\":true")); + assertTrue(body3.contains("\"type\":\"" + type + "\"")); + assertTrue(body3.contains("\"acknowledged\":true")); + + assertBusy(() -> { + GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals(type, postTrialLicenseResponse.license().type()); + }); + + Response response4 = restClient.performRequest("GET", "/_xpack/license/trial_status"); + String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response4.getStatusLine().getStatusCode()); + assertEquals("{\"eligible_to_start_trial\":false}", body4); String secondAttemptType = randomFrom(LicenseService.VALID_TRIAL_TYPES); ResponseException ex = expectThrows(ResponseException.class, - () -> restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + secondAttemptType)); - Response response4 = ex.getResponse(); - String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8)); - assertEquals(403, response4.getStatusLine().getStatusCode()); - assertTrue(body4.contains("\"trial_was_started\":false")); - assertTrue(body4.contains("\"error_message\":\"Operation failed: Trial was already activated.\"")); + () -> restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + secondAttemptType)); + Response response5 = ex.getResponse(); + String body5 = Streams.copyToString(new InputStreamReader(response5.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(403, response5.getStatusLine().getStatusCode()); + assertTrue(body5.contains("\"trial_was_started\":false")); + assertTrue(body5.contains("\"error_message\":\"Operation failed: Trial was already activated.\"")); } public void testInvalidType() throws Exception { diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index af2122d43d9a7..d9d4882b00e1c 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -64,6 +64,23 @@ artifacts { testArtifacts testJar } +task extractNativeLicenses(type: Copy) { + dependsOn configurations.nativeBundle + into "${buildDir}" + from { + project.zipTree(configurations.nativeBundle.singleFile) + } + include 'platform/licenses/**' +} +project.afterEvaluate { + // Add an extra licenses directory to the combined notices + project.tasks.findByName('generateNotice').dependsOn extractNativeLicenses + project.tasks.findByName('generateNotice').licensesDir new File("${project.buildDir}/platform/licenses") + project.tasks.findByName('generateNotice').outputs.upToDateWhen { + extractNativeLicenses.state.upToDate + } +} + run { plugin xpackModule('core') } @@ -85,7 +102,7 @@ task internalClusterTest(type: RandomizedTestingTask, include '**/*IT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } -check.dependsOn internalClusterTest +check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test // also add an "alias" task to make typing on the command line easier diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 4a096f0ca4a46..098f4190b0e88 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.ShardRouting; @@ -188,6 +189,7 @@ public void testNodesHash() { @Override public void testToXContent() throws IOException { + final String clusterUuid = "_cluster"; final ClusterName clusterName = new ClusterName("_cluster_name"); final TransportAddress transportAddress = new TransportAddress(TransportAddress.META_ADDRESS, 9300); final DiscoveryNode discoveryNode = new DiscoveryNode("_node_name", @@ -201,6 +203,7 @@ public void testToXContent() throws IOException { Version.V_6_0_0_beta1); final ClusterState clusterState = ClusterState.builder(clusterName) + .metaData(MetaData.builder().clusterUUID(clusterUuid).build()) .stateUUID("_state_uuid") .version(12L) .nodes(DiscoveryNodes.builder() @@ -500,6 +503,7 @@ public void testToXContent() throws IOException { + "\"cluster_state\":{" + "\"nodes_hash\":1314980060," + "\"status\":\"green\"," + + "\"cluster_uuid\":\"_cluster\"," + "\"version\":12," + "\"state_uuid\":\"_state_uuid\"," + "\"master_node\":\"_node\"," diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java index f591ced77ec2d..956c62eaa1f9a 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java @@ -56,6 +56,11 @@ public void execute() { multiLine.setLength(0); } + // Skip empty commands + if (line.isEmpty()) { + continue; + } + // special case to handle exit if (isExit(line)) { cliTerminal.line().em("Bye!").ln(); diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java index 2397418256ae9..31aa4749221fe 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java @@ -38,6 +38,28 @@ public void testBasicCliFunctionality() throws Exception { verifyNoMoreInteractions(mockCommand, mockSession); } + /** + * Test that empty commands are skipped. This includes commands that are + * just new lines. + */ + public void testEmptyNotSent() { + CliTerminal cliTerminal = new TestTerminal( + ";", + "", + "", + ";", + "exit;" + ); + + CliSession mockSession = mock(CliSession.class); + CliCommand mockCommand = mock(CliCommand.class); + + CliRepl cli = new CliRepl(cliTerminal, mockSession, mockCommand); + cli.execute(); + + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "logo"); + verifyNoMoreInteractions(mockSession, mockCommand); + } public void testFatalCliExceptionHandling() throws Exception { CliTerminal cliTerminal = new TestTerminal( diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json index 688afc7b79bbf..a1e5d27da1eda 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json @@ -11,6 +11,10 @@ "type": { "type" : "string", "description" : "The type of trial license to generate (default: \"trial\")" + }, + "acknowledge": { + "type" : "boolean", + "description" : "whether the user has acknowledged acknowledge messages (default: false)" } } }, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml index 98e96318d7a19..9eb3b79fda7a7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml @@ -133,7 +133,8 @@ teardown: - do: catch: forbidden - xpack.license.post_start_trial: {} + xpack.license.post_start_trial: + acknowledge: true - match: { trial_was_started: false } - match: { error_message: "Operation failed: Trial was already activated." } @@ -143,6 +144,7 @@ teardown: catch: bad_request xpack.license.post_start_trial: type: "basic" + acknowledge: true --- "Can start basic license if do not already have basic": - do: diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java index 9483ee2fc0a47..4ff36d3af9392 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java @@ -230,7 +230,7 @@ public void testPostIndexDontInvokeForOtherDocuments() throws Exception { when(operation.id()).thenReturn("_id"); when(operation.type()).thenReturn(Watch.DOC_TYPE); when(shardId.getIndexName()).thenReturn("anything"); - when(result.hasFailure()).thenReturn(false); + when(result.getResultType()).thenReturn(Engine.Result.Type.SUCCESS); listener.postIndex(shardId, operation, new ElasticsearchParseException("whatever")); verifyZeroInteractions(triggerService); @@ -744,4 +744,4 @@ private static DiscoveryNode newNode(String nodeId) { return new DiscoveryNode(nodeId, ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(asList(DiscoveryNode.Role.values())), Version.CURRENT); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java index 50a7fec474935..0556b8535e428 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java @@ -47,7 +47,7 @@ public void testWatcherMetadataParsingDoesNotSwallowOtherMetaData() throws Excep boolean manuallyStopped = randomBoolean(); WatcherMetaData watcherMetaData = new WatcherMetaData(manuallyStopped); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); - RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); + RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(Collections.singletonList(repositoryMetaData)); final MetaData.Builder metaDataBuilder = MetaData.builder(); if (randomBoolean()) { // random order of insertion metaDataBuilder.putCustom(watcherMetaData.getWriteableName(), watcherMetaData); diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle index bff9d8652b915..6051fc8acd1b5 100644 --- a/x-pack/qa/multi-node/build.gradle +++ b/x-pack/qa/multi-node/build.gradle @@ -12,6 +12,7 @@ integTestCluster { setting 'xpack.watcher.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' + setting 'logger.org.elasticsearch.xpack.security.authc', 'TRACE' extraConfigFile 'roles.yml', 'roles.yml' setupCommand 'setup-test-user', 'bin/elasticsearch-users', 'useradd', 'test-user', '-p', 'x-pack-test-password', '-r', 'test' setupCommand 'setup-super-user', 'bin/elasticsearch-users', 'useradd', 'super-user', '-p', 'x-pack-super-password', '-r', 'superuser' diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index bc7aa9fd39328..28fd4d2db49ed 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -1,5 +1,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.test.NodeInfo import javax.net.ssl.HttpsURLConnection @@ -160,9 +162,9 @@ integTestCluster.dependsOn(importClientCertificateInNodeKeyStore, importNodeCert ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(subproj.path) + integTestCluster.plugin(pluginProject.path) pluginsCount += 1 } diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle index 8c232bc5f3a51..207fa8204db00 100644 --- a/x-pack/qa/smoke-test-plugins/build.gradle +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -1,4 +1,6 @@ import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -8,9 +10,9 @@ dependencies { } ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> // need to get a non-decorated project object, so must re-lookup the project by path - integTestCluster.plugin(subproj.path) + integTestCluster.plugin(pluginProject.path) pluginsCount += 1 }