Skip to content

Commit

Permalink
Merge branch 'master' into disable-package-distribution-integ-tests
Browse files Browse the repository at this point in the history
* master:
  Fix index with unknown setting test
  Remove internal channel tracking in transports (elastic#27711)
  Improve error msg when a field name contains only white spaces (elastic#27709)
  Do not open indices with broken settings
  Set ACK timeout on indices service test
  Implement byte array reusage in `NioTransport` (elastic#27696)
  [TEST] Remove leftover ES temp directories before Vagrant tests (elastic#27722)
  Cleanup split strings by comma method
  Remove unused import from AliasResolveRoutingIT
  Add read timeouts to http module (elastic#27713)
  Fix routing with leading or trailing whitespace
  remove await fix from FullClusterRestartIT.testRecovery
  Add missing 's' to tmpdir name (elastic#27721)
  [Issue-27716]: CONTRIBUTING.md IntelliJ configurations settings are confusing. (elastic#27717)
  [TEST] Now actually wait for merges
  • Loading branch information
jasontedor committed Dec 9, 2017
2 parents 8bedef4 + 8c8b1dc commit cbce130
Show file tree
Hide file tree
Showing 80 changed files with 749 additions and 418 deletions.
4 changes: 3 additions & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,13 @@ Alternatively, `idea.no.launcher=true` can be set in the
[`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html)
file which can be accessed under Help > Edit Custom Properties (this will require a
restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to
`Run->Edit Configurations...` and change the value for the `Shorten command line` setting from
`Run->Edit Configurations->...->Defaults->JUnit` and change the value for the `Shorten command line` setting from
`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your
classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is
reported as a source of jar hell.

To run an instance of elasticsearch from the source code run `gradle run`

The Elasticsearch codebase makes heavy use of Java `assert`s and the
test runner requires that assertions be enabled within the JVM. This
can be accomplished by passing the flag `-ea` to the JVM on startup.
Expand Down
5 changes: 5 additions & 0 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -307,4 +307,9 @@ Defaults env_keep += "BATS_ARCHIVES"
SUDOERS_VARS
chmod 0440 /etc/sudoers.d/elasticsearch_vars
SHELL
# This prevents leftovers from previous tests using the
# same VM from messing up the current test
config.vm.provision "clean_tmp", run: "always", type: "shell", inline: <<-SHELL
rm -rf /tmp/elasticsearch*
SHELL
end
1 change: 0 additions & 1 deletion buildSrc/src/main/resources/checkstyle_suppressions.xml
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]RelocationIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]TruncatedRecoveryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]BytesRestResponseTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasResolveRoutingIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasRoutingIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]SimpleRoutingIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]FileScriptTests.java" checks="LineLength" />
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.node.InternalSettingsPreparer;
Expand Down Expand Up @@ -169,11 +170,12 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService);
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings);
BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool,
bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
final Transport transport = networkModule.getTransportSupplier().get();
final TransportService transportService = new TransportService(settings, transport, threadPool,
networkModule.getTransportInterceptor(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
Expand Down Expand Up @@ -58,7 +59,7 @@ private AliasMetaData(String alias, CompressedXContent filter, String indexRouti
this.indexRouting = indexRouting;
this.searchRouting = searchRouting;
if (searchRouting != null) {
searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
searchRoutingValues = Collections.unmodifiableSet(Sets.newHashSet(Strings.splitStringByCommaToArray(searchRouting)));
} else {
searchRoutingValues = emptySet();
}
Expand Down Expand Up @@ -186,7 +187,7 @@ public AliasMetaData(StreamInput in) throws IOException {
}
if (in.readBoolean()) {
searchRouting = in.readString();
searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
searchRoutingValues = Collections.unmodifiableSet(Sets.newHashSet(Strings.splitStringByCommaToArray(searchRouting)));
} else {
searchRouting = null;
searchRoutingValues = emptySet();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
Expand Down Expand Up @@ -358,6 +359,7 @@ public Map<String, Set<String>> resolveSearchRouting(ClusterState state, @Nullab
resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions);
}

// TODO: it appears that this can never be true?
if (isAllIndices(resolvedExpressions)) {
return resolveSearchRoutingAllIndices(state.metaData(), routing);
}
Expand All @@ -367,7 +369,7 @@ public Map<String, Set<String>> resolveSearchRouting(ClusterState state, @Nullab
// List of indices that don't require any routing
Set<String> norouting = new HashSet<>();
if (routing != null) {
paramRouting = Strings.splitStringByCommaToSet(routing);
paramRouting = Sets.newHashSet(Strings.splitStringByCommaToArray(routing));
}

for (String expression : resolvedExpressions) {
Expand Down Expand Up @@ -442,9 +444,9 @@ public Map<String, Set<String>> resolveSearchRouting(ClusterState state, @Nullab
/**
* Sets the same routing for all indices
*/
private Map<String, Set<String>> resolveSearchRoutingAllIndices(MetaData metaData, String routing) {
public Map<String, Set<String>> resolveSearchRoutingAllIndices(MetaData metaData, String routing) {
if (routing != null) {
Set<String> r = Strings.splitStringByCommaToSet(routing);
Set<String> r = Sets.newHashSet(Strings.splitStringByCommaToArray(routing));
Map<String, Set<String>> routings = new HashMap<>();
String[] concreteIndices = metaData.getConcreteAllIndices();
for (String index : concreteIndices) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ public MetaDataIndexUpgradeService(Settings settings, NamedXContentRegistry xCon
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
// Throws an exception if there are too-old segments:
if (isUpgraded(indexMetaData)) {
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
return indexMetaData;
}
checkSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion);
Expand Down
119 changes: 36 additions & 83 deletions core/src/main/java/org/elasticsearch/common/Strings.java
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
import java.util.Set;
import java.util.StringTokenizer;
import java.util.TreeSet;
import java.util.function.Supplier;

import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
Expand Down Expand Up @@ -410,62 +411,27 @@ public static String[] toStringArray(Collection<String> collection) {
return collection.toArray(new String[collection.size()]);
}

public static Set<String> splitStringByCommaToSet(final String s) {
return splitStringToSet(s, ',');
}

public static String[] splitStringByCommaToArray(final String s) {
if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY;
else return s.split(",");
/**
* Tokenize the specified string by commas to a set, trimming whitespace and ignoring empty tokens.
*
* @param s the string to tokenize
* @return the set of tokens
*/
public static Set<String> tokenizeByCommaToSet(final String s) {
if (s == null) return Collections.emptySet();
return tokenizeToCollection(s, ",", HashSet::new);
}

/**
* A convenience method for splitting a delimited string into
* a set and trimming leading and trailing whitespace from all
* split strings.
* Split the specified string by commas to an array.
*
* @param s the string to split
* @param c the delimiter to split on
* @return the set of split strings
*/
public static Set<String> splitStringToSet(final String s, final char c) {
if (s == null || s.isEmpty()) {
return Collections.emptySet();
}
final char[] chars = s.toCharArray();
int count = 1;
for (final char x : chars) {
if (x == c) {
count++;
}
}
final Set<String> result = new HashSet<>(count);
final int len = chars.length;
int start = 0; // starting index in chars of the current substring.
int pos = 0; // current index in chars.
int end = 0; // the position of the end of the current token
for (; pos < len; pos++) {
if (chars[pos] == c) {
int size = end - start;
if (size > 0) { // only add non empty strings
result.add(new String(chars, start, size));
}
start = pos + 1;
end = start;
} else if (Character.isWhitespace(chars[pos])) {
if (start == pos) {
// skip over preceding whitespace
start++;
}
} else {
end = pos + 1;
}
}
int size = end - start;
if (size > 0) {
result.add(new String(chars, start, size));
}
return result;
* @return the array of split values
* @see String#split(String)
*/
public static String[] splitStringByCommaToArray(final String s) {
if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY;
else return s.split(",");
}

/**
Expand Down Expand Up @@ -499,56 +465,43 @@ public static String[] split(String toSplit, String delimiter) {
* tokens. A delimiter is always a single character; for multi-character
* delimiters, consider using <code>delimitedListToStringArray</code>
*
* @param str the String to tokenize
* @param s the String to tokenize
* @param delimiters the delimiter characters, assembled as String
* (each of those characters is individually considered as delimiter).
* @return an array of the tokens
* @see java.util.StringTokenizer
* @see java.lang.String#trim()
* @see #delimitedListToStringArray
*/
public static String[] tokenizeToStringArray(String str, String delimiters) {
return tokenizeToStringArray(str, delimiters, true, true);
public static String[] tokenizeToStringArray(final String s, final String delimiters) {
return toStringArray(tokenizeToCollection(s, delimiters, ArrayList::new));
}

/**
* Tokenize the given String into a String array via a StringTokenizer.
* <p>The given delimiters string is supposed to consist of any number of
* delimiter characters. Each of those characters can be used to separate
* tokens. A delimiter is always a single character; for multi-character
* delimiters, consider using <code>delimitedListToStringArray</code>
* Tokenizes the specified string to a collection using the specified delimiters as the token delimiters. This method trims whitespace
* from tokens and ignores empty tokens.
*
* @param str the String to tokenize
* @param delimiters the delimiter characters, assembled as String
* (each of those characters is individually considered as delimiter)
* @param trimTokens trim the tokens via String's <code>trim</code>
* @param ignoreEmptyTokens omit empty tokens from the result array
* (only applies to tokens that are empty after trimming; StringTokenizer
* will not consider subsequent delimiters as token in the first place).
* @return an array of the tokens (<code>null</code> if the input String
* was <code>null</code>)
* @param s the string to tokenize.
* @param delimiters the token delimiters
* @param supplier a collection supplier
* @param <T> the type of the collection
* @return the tokens
* @see java.util.StringTokenizer
* @see java.lang.String#trim()
* @see #delimitedListToStringArray
*/
public static String[] tokenizeToStringArray(
String str, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) {

if (str == null) {
private static <T extends Collection<String>> T tokenizeToCollection(
final String s, final String delimiters, final Supplier<T> supplier) {
if (s == null) {
return null;
}
StringTokenizer st = new StringTokenizer(str, delimiters);
List<String> tokens = new ArrayList<>();
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (trimTokens) {
token = token.trim();
}
if (!ignoreEmptyTokens || token.length() > 0) {
final StringTokenizer tokenizer = new StringTokenizer(s, delimiters);
final T tokens = supplier.get();
while (tokenizer.hasMoreTokens()) {
final String token = tokenizer.nextToken().trim();
if (token.length() > 0) {
tokens.add(token);
}
}
return toStringArray(tokens);
return tokens;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.http.HttpServerTransport;
Expand Down Expand Up @@ -107,6 +108,7 @@ public final class NetworkModule {
*/
public NetworkModule(Settings settings, boolean transportClient, List<NetworkPlugin> plugins, ThreadPool threadPool,
BigArrays bigArrays,
PageCacheRecycler pageCacheRecycler,
CircuitBreakerService circuitBreakerService,
NamedWriteableRegistry namedWriteableRegistry,
NamedXContentRegistry xContentRegistry,
Expand All @@ -121,9 +123,9 @@ public NetworkModule(Settings settings, boolean transportClient, List<NetworkPlu
registerHttpTransport(entry.getKey(), entry.getValue());
}
}
Map<String, Supplier<Transport>> httpTransportFactory = plugin.getTransports(settings, threadPool, bigArrays,
Map<String, Supplier<Transport>> transportFactory = plugin.getTransports(settings, threadPool, bigArrays, pageCacheRecycler,
circuitBreakerService, namedWriteableRegistry, networkService);
for (Map.Entry<String, Supplier<Transport>> entry : httpTransportFactory.entrySet()) {
for (Map.Entry<String, Supplier<Transport>> entry : transportFactory.entrySet()) {
registerTransport(entry.getKey(), entry.getValue());
}
List<TransportInterceptor> transportInterceptors = plugin.getTransportInterceptors(namedWriteableRegistry,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,17 +264,41 @@ public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consu
}

/**
* Validates that all given settings are registered and valid
* @param settings the settings to validate
* @param validateDependencies if <code>true</code> settings dependencies are validated as well.
* Validates that all settings are registered and valid.
*
* @param settings the settings to validate
* @param validateDependencies true if dependent settings should be validated
* @see Setting#getSettingsDependencies(String)
*/
public final void validate(Settings settings, boolean validateDependencies) {
List<RuntimeException> exceptions = new ArrayList<>();
for (String key : settings.keySet()) { // settings iterate in deterministic fashion
public final void validate(final Settings settings, final boolean validateDependencies) {
validate(settings, validateDependencies, false, false);
}

/**
* Validates that all settings are registered and valid.
*
* @param settings the settings
* @param validateDependencies true if dependent settings should be validated
* @param ignorePrivateSettings true if private settings should be ignored during validation
* @param ignoreArchivedSettings true if archived settings should be ignored during validation
* @see Setting#getSettingsDependencies(String)
*/
public final void validate(
final Settings settings,
final boolean validateDependencies,
final boolean ignorePrivateSettings,
final boolean ignoreArchivedSettings) {
final List<RuntimeException> exceptions = new ArrayList<>();
for (final String key : settings.keySet()) { // settings iterate in deterministic fashion
if (isPrivateSetting(key) && ignorePrivateSettings) {
continue;
}
if (key.startsWith(ARCHIVED_SETTINGS_PREFIX) && ignoreArchivedSettings) {
continue;
}
try {
validate(key, settings, validateDependencies);
} catch (RuntimeException ex) {
} catch (final RuntimeException ex) {
exceptions.add(ex);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {

)));

public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY,
BUILT_IN_INDEX_SETTINGS);
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS);

public IndexScopedSettings(Settings settings, Set<Setting<?>> settingsSet) {
super(settings, settingsSet, Property.IndexScope);
Expand Down
Loading

0 comments on commit cbce130

Please sign in to comment.