Skip to content

Commit

Permalink
Merge branch 'master' into ccr
Browse files Browse the repository at this point in the history
* master:
  Reduce CLI scripts to one-liners (#30759)
  SQL: Preserve scoring in bool queries (#30730)
  QA: Switch rolling upgrade to 3 nodes (#30728)
  [TEST] Enable DEBUG logging on testAutoQueueSizingWithMax
  [ML] Don't install empty ML metadata on startup (#30751)
  Add assertion on removing copy_settings (#30748)
  bump lucene version for 6_3_0
  [DOCS] Mark painless execute api as experimental (#30710)
  disable annotation processor for docs (#30610)
  Add more script contexts (#30721)
  Fix default shards count in create index docs (#30747)
  Mute testCorruptFileThenSnapshotAndRestore
  • Loading branch information
dnhatn committed May 21, 2018
2 parents 3245e78 + 3ce2297 commit 3f6434c
Show file tree
Hide file tree
Showing 74 changed files with 530 additions and 694 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import org.elasticsearch.gradle.BuildPlugin
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.plugins.JavaBasePlugin
import org.gradle.api.tasks.compile.JavaCompile

/**
* Configures the build to compile against Elasticsearch's test framework and
Expand All @@ -49,5 +50,12 @@ public class StandaloneTestPlugin implements Plugin<Project> {
test.testClassesDir project.sourceSets.test.output.classesDir
test.mustRunAfter(project.precommit)
project.check.dependsOn(test)

project.tasks.withType(JavaCompile) {
// This will be the default in Gradle 5.0
if (options.compilerArgs.contains("-processor") == false) {
options.compilerArgs << '-proc:none'
}
}
}
}
22 changes: 22 additions & 0 deletions distribution/src/bin/elasticsearch-cli
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash

set -e -o pipefail

source "`dirname "$0"`"/elasticsearch-env

IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES"
for additional_source in "${additional_sources[@]}"
do
source "`dirname "$0"`"/$additional_source
done

exec \
"$JAVA" \
$ES_JAVA_OPTS \
-Des.path.home="$ES_HOME" \
-Des.path.conf="$ES_PATH_CONF" \
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
-cp "$ES_CLASSPATH" \
$1 \
"${@:2}"
11 changes: 1 addition & 10 deletions distribution/src/bin/elasticsearch-keystore
Original file line number Diff line number Diff line change
@@ -1,14 +1,5 @@
#!/bin/bash

source "`dirname "$0"`"/elasticsearch-env

exec \
"$JAVA" \
$ES_JAVA_OPTS \
-Des.path.home="$ES_HOME" \
-Des.path.conf="$ES_PATH_CONF" \
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
-cp "$ES_CLASSPATH" \
"`dirname "$0"`"/elasticsearch-cli \
org.elasticsearch.common.settings.KeyStoreCli \
"$@"
11 changes: 1 addition & 10 deletions distribution/src/bin/elasticsearch-plugin
Original file line number Diff line number Diff line change
@@ -1,14 +1,5 @@
#!/bin/bash

source "`dirname "$0"`"/elasticsearch-env

exec \
"$JAVA" \
$ES_JAVA_OPTS \
-Des.path.home="$ES_HOME" \
-Des.path.conf="$ES_PATH_CONF" \
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
-cp "$ES_CLASSPATH" \
"`dirname "$0"`"/elasticsearch-cli \
org.elasticsearch.plugins.PluginCli \
"$@"
11 changes: 1 addition & 10 deletions distribution/src/bin/elasticsearch-translog
Original file line number Diff line number Diff line change
@@ -1,14 +1,5 @@
#!/bin/bash

source "`dirname "$0"`"/elasticsearch-env

exec \
"$JAVA" \
$ES_JAVA_OPTS \
-Des.path.home="$ES_HOME" \
-Des.path.conf="$ES_PATH_CONF" \
-Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \
-Des.distribution.type="$ES_DISTRIBUTION_TYPE" \
-cp "$ES_CLASSPATH" \
"`dirname "$0"`"/elasticsearch-cli \
org.elasticsearch.index.translog.TranslogToolCli \
"$@"
2 changes: 2 additions & 0 deletions docs/painless/painless-execute-script.asciidoc
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
[[painless-execute-api]]
=== Painless execute API

experimental[The painless execute api is new and the request / response format may change in a breaking way in the future]

The Painless execute API allows an arbitrary script to be executed and a result to be returned.

[[painless-execute-api-parameters]]
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/indices/create-index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ PUT twitter
}
--------------------------------------------------
// CONSOLE
<1> Default for `number_of_shards` is 5
<1> Default for `number_of_shards` is 1
<2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard)

The above second curl example shows how an index called `twitter` can be
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ public String getType() {

@Override
public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) {
if (context.equals(SearchScript.CONTEXT) == false) {
if (context.equals(SearchScript.SCRIPT_SCORE_CONTEXT) == false) {
throw new IllegalArgumentException(getType() + " scripts cannot be used for context [" + context.name + "]");
}
// we use the script "source" as the script identifier
Expand Down
88 changes: 61 additions & 27 deletions qa/rolling-upgrade/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,26 @@ task bwcTest {
}

for (Version version : bwcVersions.wireCompatible) {
/*
* The goal here is to:
* <ul>
* <li>start three nodes on the old version
* <li>run tests with systemProperty 'tests.rest.suite', 'old_cluster'
* <li>shut down one node
* <li>start a node with the new version
* <li>run tests with systemProperty 'tests.rest.suite', 'mixed_cluster'
* <li>shut down one node on the old version
* <li>start a node with the new version
* <li>run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again
* <li>shut down the last node with the old version
* <li>start a node with the new version
* <li>run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster'
* <li>shut down the entire cluster
* </ul>
*
* Be careful: gradle dry run spits out tasks in the wrong order but,
* strangely, running the tasks works properly.
*/
String baseName = "v${version}"

Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) {
Expand All @@ -39,8 +59,8 @@ for (Version version : bwcVersions.wireCompatible) {
Object extension = extensions.findByName("${baseName}#oldClusterTestCluster")
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
bwcVersion = version
numBwcNodes = 2
numNodes = 2
numBwcNodes = 3
numNodes = 3
clusterName = 'rolling-upgrade'
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
if (version.onOrAfter('5.3.0')) {
Expand All @@ -53,43 +73,57 @@ for (Version version : bwcVersions.wireCompatible) {
systemProperty 'tests.rest.suite', 'old_cluster'
}

Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask)
Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed ->
configure(extensions.findByName("${baseName}#${name}")) {
dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop"
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> unicastSeed() }
minimumMasterNodes = { 3 }
/* Override the data directory so the new node always gets the node we
* just stopped's data directory. */
dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir }
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}
}

configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) {
dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop"
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() }
minimumMasterNodes = { 2 }
/* Override the data directory so the new node always gets the node we
* just stopped's data directory. */
dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir }
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask)

configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner,
0, { oldClusterTest.nodes.get(1).transportUri() })

Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner")
oneThirdUpgradedTestRunner.configure {
systemProperty 'tests.rest.suite', 'mixed_cluster'
systemProperty 'tests.first_round', 'true'
finalizedBy "${baseName}#oldClusterTestCluster#node1.stop"
}

Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner")
mixedClusterTestRunner.configure {
Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask)

configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner,
1, { oneThirdUpgradedTest.nodes.get(0).transportUri() })

Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner")
twoThirdsUpgradedTestRunner.configure {
systemProperty 'tests.rest.suite', 'mixed_cluster'
finalizedBy "${baseName}#oldClusterTestCluster#node0.stop"
systemProperty 'tests.first_round', 'false'
finalizedBy "${baseName}#oldClusterTestCluster#node2.stop"
}

Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask)

configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) {
dependsOn mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop"
clusterName = 'rolling-upgrade'
unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() }
minimumMasterNodes = { 2 }
/* Override the data directory so the new node always gets the node we
* just stopped's data directory. */
dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir}
setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
}
configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner,
2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() })

Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner")
upgradedClusterTestRunner.configure {
systemProperty 'tests.rest.suite', 'upgraded_cluster'
// only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion
finalizedBy "${baseName}#mixedClusterTestCluster#stop"
/*
* Force stopping all the upgraded nodes after the test runner
* so they are alive during the test.
*/
finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop"
finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop"
}

Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.upgrades;

import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Response;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.yaml.ObjectPath;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Future;
import java.util.function.Predicate;

import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING;
import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.notNullValue;

public abstract class AbstractRollingTestCase extends ESRestTestCase {
protected enum ClusterType {
OLD,
MIXED,
UPGRADED;

public static ClusterType parse(String value) {
switch (value) {
case "old_cluster":
return OLD;
case "mixed_cluster":
return MIXED;
case "upgraded_cluster":
return UPGRADED;
default:
throw new AssertionError("unknown cluster type: " + value);
}
}
}

protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite"));

@Override
protected final boolean preserveIndicesUponCompletion() {
return true;
}

@Override
protected final boolean preserveReposUponCompletion() {
return true;
}

@Override
protected final Settings restClientSettings() {
return Settings.builder().put(super.restClientSettings())
// increase the timeout here to 90 seconds to handle long waits for a green
// cluster health. the waits for green need to be longer than a minute to
// account for delayed shards
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s")
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s")
.build();
}
}
Loading

0 comments on commit 3f6434c

Please sign in to comment.