From 15824ad82fe25d6110930e446f84cf2bf0535456 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Wed, 29 Aug 2018 09:24:56 -0700 Subject: [PATCH 01/52] Painless: Add Bindings (#33042) Add bindings that allow some specialized methods to store permanent state between script executions. --- .../elasticsearch/painless/spi/Whitelist.java | 5 +- .../painless/spi/WhitelistBinding.java | 67 +++++ .../painless/spi/WhitelistClass.java | 5 +- .../painless/spi/WhitelistLoader.java | 161 +++++++--- .../painless/spi/WhitelistMethod.java | 3 +- .../elasticsearch/painless/BindingTest.java | 32 ++ .../org/elasticsearch/painless/Globals.java | 18 +- .../painless/lookup/PainlessBinding.java | 41 +++ .../painless/lookup/PainlessClass.java | 1 + .../painless/lookup/PainlessClassBuilder.java | 1 + .../painless/lookup/PainlessConstructor.java | 1 + .../painless/lookup/PainlessField.java | 1 + .../painless/lookup/PainlessLookup.java | 15 +- .../lookup/PainlessLookupBuilder.java | 282 ++++++++++++++++-- .../painless/lookup/PainlessMethod.java | 1 + .../painless/node/ECallLocal.java | 58 +++- .../elasticsearch/painless/node/SSource.java | 7 + .../painless/spi/org.elasticsearch.txt | 40 +-- .../elasticsearch/painless/BindingsTests.java | 64 ++++ 19 files changed, 712 insertions(+), 91 deletions(-) create mode 100644 modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java create mode 100644 modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index c38325edd1424..7acbff6cb0b93 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -61,9 +61,12 @@ public final class Whitelist { /** The {@link List} of all the whitelisted Painless classes. */ public final List whitelistClasses; + public final List whitelistBindings; + /** Standard constructor. All values must be not {@code null}. */ - public Whitelist(ClassLoader classLoader, List whitelistClasses) { + public Whitelist(ClassLoader classLoader, List whitelistClasses, List whitelistBindings) { this.classLoader = Objects.requireNonNull(classLoader); this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); + this.whitelistBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistBindings)); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java new file mode 100644 index 0000000000000..364dbbb09ca9b --- /dev/null +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Objects; + +/** + * A binding represents a method call that stores state. Each binding class must have exactly one + * public constructor and one public method excluding those inherited directly from {@link Object}. + * The canonical type name parameters provided must match those of the constructor and method combined. + * The constructor for a binding class will be called when the binding method is called for the first + * time at which point state may be stored for the arguments passed into the constructor. The method + * for a binding class will be called each time the binding method is called and may use the previously + * stored state. + */ +public class WhitelistBinding { + + /** Information about where this constructor was whitelisted from. */ + public final String origin; + + /** The Java class name this binding represents. */ + public final String targetJavaClassName; + + /** The method name for this binding. */ + public final String methodName; + + /** + * The canonical type name for the return type. + */ + public final String returnCanonicalTypeName; + + /** + * A {@link List} of {@link String}s that are the Painless type names for the parameters of the + * constructor which can be used to look up the Java constructor through reflection. + */ + public final List canonicalTypeNameParameters; + + /** Standard constructor. All values must be not {@code null}. */ + public WhitelistBinding(String origin, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + this.origin = Objects.requireNonNull(origin); + this.targetJavaClassName = Objects.requireNonNull(targetJavaClassName); + + this.methodName = Objects.requireNonNull(methodName); + this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); + this.canonicalTypeNameParameters = Objects.requireNonNull(canonicalTypeNameParameters); + } +} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java index 0b216ae5c2953..7b3eb75aa3ecd 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java @@ -62,9 +62,8 @@ public final class WhitelistClass { /** Standard constructor. All values must be not {@code null}. */ public WhitelistClass(String origin, String javaClassName, boolean noImport, - List whitelistConstructors, - List whitelistMethods, - List whitelistFields) { + List whitelistConstructors, List whitelistMethods, List whitelistFields) + { this.origin = Objects.requireNonNull(origin); this.javaClassName = Objects.requireNonNull(javaClassName); diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index a4a0076626a9c..0279c82f1b67b 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -133,6 +133,7 @@ public final class WhitelistLoader { */ public static Whitelist loadFromResourceFiles(Class resource, String... filepaths) { List whitelistClasses = new ArrayList<>(); + List whitelistBindings = new ArrayList<>(); // Execute a single pass through the whitelist text files. This will gather all the // constructors, methods, augmented methods, and fields for each whitelisted class. @@ -141,8 +142,9 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep int number = -1; try (LineNumberReader reader = new LineNumberReader( - new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + String parseType = null; String whitelistClassOrigin = null; String javaClassName = null; boolean noImport = false; @@ -165,7 +167,11 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep // Ensure the final token of the line is '{'. if (line.endsWith("{") == false) { throw new IllegalArgumentException( - "invalid class definition: failed to parse class opening bracket [" + line + "]"); + "invalid class definition: failed to parse class opening bracket [" + line + "]"); + } + + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed class definition [" + line + "]"); } // Parse the Java class name. @@ -178,6 +184,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep throw new IllegalArgumentException("invalid class definition: failed to parse class name [" + line + "]"); } + parseType = "class"; whitelistClassOrigin = "[" + filepath + "]:[" + number + "]"; javaClassName = tokens[0]; @@ -185,34 +192,117 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep whitelistConstructors = new ArrayList<>(); whitelistMethods = new ArrayList<>(); whitelistFields = new ArrayList<>(); + } else if (line.startsWith("static ")) { + // Ensure the final token of the line is '{'. + if (line.endsWith("{") == false) { + throw new IllegalArgumentException( + "invalid static definition: failed to parse static opening bracket [" + line + "]"); + } - // Handle the end of a class, by creating a new WhitelistClass with all the previously gathered - // constructors, methods, augmented methods, and fields, and adding it to the list of whitelisted classes. + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed static definition [" + line + "]"); + } + + parseType = "static"; + + // Handle the end of a definition and reset all previously gathered values. // Expects the following format: '}' '\n' } else if (line.equals("}")) { - if (javaClassName == null) { - throw new IllegalArgumentException("invalid class definition: extraneous closing bracket"); + if (parseType == null) { + throw new IllegalArgumentException("invalid definition: extraneous closing bracket"); } - whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, - whitelistConstructors, whitelistMethods, whitelistFields)); + // Create a new WhitelistClass with all the previously gathered constructors, methods, + // augmented methods, and fields, and add it to the list of whitelisted classes. + if ("class".equals(parseType)) { + whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, + whitelistConstructors, whitelistMethods, whitelistFields)); + + whitelistClassOrigin = null; + javaClassName = null; + noImport = false; + whitelistConstructors = null; + whitelistMethods = null; + whitelistFields = null; + } - // Set all the variables to null to ensure a new class definition is found before other parsable values. - whitelistClassOrigin = null; - javaClassName = null; - noImport = false; - whitelistConstructors = null; - whitelistMethods = null; - whitelistFields = null; + // Reset the parseType. + parseType = null; - // Handle all other valid cases. - } else { + // Handle static definition types. + // Expects the following format: ID ID '(' ( ID ( ',' ID )* )? ')' 'bound_to' ID '\n' + } else if ("static".equals(parseType)) { + // Mark the origin of this parsable object. + String origin = "[" + filepath + "]:[" + number + "]"; + + // Parse the tokens prior to the method parameters. + int parameterStartIndex = line.indexOf('('); + + if (parameterStartIndex == -1) { + throw new IllegalArgumentException( + "illegal static definition: start of method parameters not found [" + line + "]"); + } + + String[] tokens = line.substring(0, parameterStartIndex).trim().split("\\s+"); + + String methodName; + + // Based on the number of tokens, look up the Java method name. + if (tokens.length == 2) { + methodName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); + } + + String returnCanonicalTypeName = tokens[0]; + + // Parse the method parameters. + int parameterEndIndex = line.indexOf(')'); + + if (parameterEndIndex == -1) { + throw new IllegalArgumentException( + "illegal static definition: end of method parameters not found [" + line + "]"); + } + + String[] canonicalTypeNameParameters = + line.substring(parameterStartIndex + 1, parameterEndIndex).replaceAll("\\s+", "").split(","); + + // Handle the case for a method with no parameters. + if ("".equals(canonicalTypeNameParameters[0])) { + canonicalTypeNameParameters = new String[0]; + } + + // Parse the static type and class. + tokens = line.substring(parameterEndIndex + 1).trim().split("\\s+"); + + String staticType; + String targetJavaClassName; + + // Based on the number of tokens, look up the type and class. + if (tokens.length == 2) { + staticType = tokens[0]; + targetJavaClassName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid static definition: unexpected format [" + line + "]"); + } + + // Check the static type is valid. + if ("bound_to".equals(staticType) == false) { + throw new IllegalArgumentException( + "invalid static definition: unexpected static type [" + staticType + "] [" + line + "]"); + } + + whitelistBindings.add(new WhitelistBinding(origin, targetJavaClassName, + methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters))); + + // Handle class definition types. + } else if ("class".equals(parseType)) { // Mark the origin of this parsable object. String origin = "[" + filepath + "]:[" + number + "]"; // Ensure we have a defined class before adding any constructors, methods, augmented methods, or fields. - if (javaClassName == null) { - throw new IllegalArgumentException("invalid object definition: expected a class name [" + line + "]"); + if (parseType == null) { + throw new IllegalArgumentException("invalid definition: expected one of ['class', 'static'] [" + line + "]"); } // Handle the case for a constructor definition. @@ -221,7 +311,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid constructor definition: expected a closing parenthesis [" + line + "]"); + "invalid constructor definition: expected a closing parenthesis [" + line + "]"); } // Parse the constructor parameters. @@ -234,34 +324,34 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep whitelistConstructors.add(new WhitelistConstructor(origin, Arrays.asList(tokens))); - // Handle the case for a method or augmented method definition. - // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' + // Handle the case for a method or augmented method definition. + // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' } else if (line.contains("(")) { // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid method definition: expected a closing parenthesis [" + line + "]"); + "invalid method definition: expected a closing parenthesis [" + line + "]"); } // Parse the tokens prior to the method parameters. int parameterIndex = line.indexOf('('); - String[] tokens = line.trim().substring(0, parameterIndex).split("\\s+"); + String[] tokens = line.substring(0, parameterIndex).trim().split("\\s+"); - String javaMethodName; + String methodName; String javaAugmentedClassName; // Based on the number of tokens, look up the Java method name and if provided the Java augmented class. if (tokens.length == 2) { - javaMethodName = tokens[1]; + methodName = tokens[1]; javaAugmentedClassName = null; } else if (tokens.length == 3) { - javaMethodName = tokens[2]; + methodName = tokens[2]; javaAugmentedClassName = tokens[1]; } else { throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); } - String painlessReturnTypeName = tokens[0]; + String returnCanonicalTypeName = tokens[0]; // Parse the method parameters. tokens = line.substring(parameterIndex + 1, line.length() - 1).replaceAll("\\s+", "").split(","); @@ -271,11 +361,11 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep tokens = new String[0]; } - whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, javaMethodName, - painlessReturnTypeName, Arrays.asList(tokens))); + whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, methodName, + returnCanonicalTypeName, Arrays.asList(tokens))); - // Handle the case for a field definition. - // Expects the following format: ID ID '\n' + // Handle the case for a field definition. + // Expects the following format: ID ID '\n' } else { // Parse the field tokens. String[] tokens = line.split("\\s+"); @@ -287,20 +377,23 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep whitelistFields.add(new WhitelistField(origin, tokens[1], tokens[0])); } + } else { + throw new IllegalArgumentException("invalid definition: unable to parse line [" + line + "]"); } } // Ensure all classes end with a '}' token before the end of the file. if (javaClassName != null) { - throw new IllegalArgumentException("invalid class definition: expected closing bracket"); + throw new IllegalArgumentException("invalid definition: expected closing bracket"); } } catch (Exception exception) { throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); } } + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(loader, whitelistClasses); + return new Whitelist(loader, whitelistClasses, whitelistBindings); } private WhitelistLoader() {} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java index 5cd023a3591ad..f450ee0238d19 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java @@ -67,7 +67,8 @@ public class WhitelistMethod { * is augmented as described in the class documentation. */ public WhitelistMethod(String origin, String augmentedCanonicalClassName, String methodName, - String returnCanonicalTypeName, List canonicalTypeNameParameters) { + String returnCanonicalTypeName, List canonicalTypeNameParameters) { + this.origin = Objects.requireNonNull(origin); this.augmentedCanonicalClassName = augmentedCanonicalClassName; this.methodName = methodName; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java new file mode 100644 index 0000000000000..1dcbce037b264 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +public class BindingTest { + public int state; + + public BindingTest(int state0, int state1) { + this.state = state0 + state1; + } + + public int testAddWithState(int stateless) { + return stateless + state; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java index 83eb74d827f88..d18cf2780cf3c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java @@ -31,6 +31,7 @@ public class Globals { private final Map syntheticMethods = new HashMap<>(); private final Map constantInitializers = new HashMap<>(); + private final Map> bindings = new HashMap<>(); private final BitSet statements; /** Create a new Globals from the set of statement boundaries */ @@ -54,7 +55,15 @@ public void addConstantInitializer(Constant constant) { throw new IllegalStateException("constant initializer: " + constant.name + " already exists"); } } - + + /** Adds a new binding to be written as a local variable */ + public String addBinding(Class type) { + String name = "$binding$" + bindings.size(); + bindings.put(name, type); + + return name; + } + /** Returns the current synthetic methods */ public Map getSyntheticMethods() { return syntheticMethods; @@ -64,7 +73,12 @@ public Map getSyntheticMethods() { public Map getConstantInitializers() { return constantInitializers; } - + + /** Returns the current bindings */ + public Map> getBindings() { + return bindings; + } + /** Returns the set of statement boundaries */ public BitSet getStatements() { return statements; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java new file mode 100644 index 0000000000000..41178dd5d7506 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.lookup; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.util.List; + +public class PainlessBinding { + + public final Constructor javaConstructor; + public final Method javaMethod; + + public final Class returnType; + public final List> typeParameters; + + PainlessBinding(Constructor javaConstructor, Method javaMethod, Class returnType, List> typeParameters) { + this.javaConstructor = javaConstructor; + this.javaMethod = javaMethod; + + this.returnType = returnType; + this.typeParameters = typeParameters; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index 50bb79dcfbdf5..f5d6c97bb2f3e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -24,6 +24,7 @@ import java.util.Map; public final class PainlessClass { + public final Map constructors; public final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java index a61215e9ed749..92100d1bda0c0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java @@ -24,6 +24,7 @@ import java.util.Map; final class PainlessClassBuilder { + final Map constructors; final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java index 76597c1a29d65..a3dc6c8122bd6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java @@ -25,6 +25,7 @@ import java.util.List; public class PainlessConstructor { + public final Constructor javaConstructor; public final List> typeParameters; public final MethodHandle methodHandle; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java index a55d6c3730ebd..9567e97331c7a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java @@ -23,6 +23,7 @@ import java.lang.reflect.Field; public final class PainlessField { + public final Field javaField; public final Class typeParameter; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index 55855a3cb1efb..2d6ed3e361dc3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -37,12 +37,17 @@ public final class PainlessLookup { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClass> classesToPainlessClasses; - PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses) { + private final Map painlessMethodKeysToPainlessBindings; + + PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses, + Map painlessMethodKeysToPainlessBindings) { Objects.requireNonNull(canonicalClassNamesToClasses); Objects.requireNonNull(classesToPainlessClasses); this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses); this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses); + + this.painlessMethodKeysToPainlessBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessBindings); } public boolean isValidCanonicalClassName(String canonicalClassName) { @@ -162,6 +167,14 @@ public PainlessField lookupPainlessField(Class targetClass, boolean isStatic, return painlessField; } + public PainlessBinding lookupPainlessBinding(String methodName, int arity) { + Objects.requireNonNull(methodName); + + String painlessMethodKey = buildPainlessMethodKey(methodName, arity); + + return painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + } + public PainlessMethod lookupFunctionalInterfacePainlessMethod(Class targetClass) { PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index c8353b54c9f44..7adc816252059 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless.lookup; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistBinding; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistConstructor; import org.elasticsearch.painless.spi.WhitelistField; @@ -52,11 +53,11 @@ public final class PainlessLookupBuilder { private static class PainlessConstructorCacheKey { - private final Class targetType; + private final Class targetClass; private final List> typeParameters; - private PainlessConstructorCacheKey(Class targetType, List> typeParameters) { - this.targetType = targetType; + private PainlessConstructorCacheKey(Class targetClass, List> typeParameters) { + this.targetClass = targetClass; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -72,25 +73,27 @@ public boolean equals(Object object) { PainlessConstructorCacheKey that = (PainlessConstructorCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, typeParameters); + return Objects.hash(targetClass, typeParameters); } } private static class PainlessMethodCacheKey { - private final Class targetType; + private final Class targetClass; private final String methodName; + private final Class returnType; private final List> typeParameters; - private PainlessMethodCacheKey(Class targetType, String methodName, List> typeParameters) { - this.targetType = targetType; + private PainlessMethodCacheKey(Class targetClass, String methodName, Class returnType, List> typeParameters) { + this.targetClass = targetClass; this.methodName = methodName; + this.returnType = returnType; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -106,25 +109,26 @@ public boolean equals(Object object) { PainlessMethodCacheKey that = (PainlessMethodCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(methodName, that.methodName) && + Objects.equals(returnType, that.returnType) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, methodName, typeParameters); + return Objects.hash(targetClass, methodName, returnType, typeParameters); } } private static class PainlessFieldCacheKey { - private final Class targetType; + private final Class targetClass; private final String fieldName; private final Class typeParameter; - private PainlessFieldCacheKey(Class targetType, String fieldName, Class typeParameter) { - this.targetType = targetType; + private PainlessFieldCacheKey(Class targetClass, String fieldName, Class typeParameter) { + this.targetClass = targetClass; this.fieldName = fieldName; this.typeParameter = typeParameter; } @@ -141,20 +145,61 @@ public boolean equals(Object object) { PainlessFieldCacheKey that = (PainlessFieldCacheKey) object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(fieldName, that.fieldName) && Objects.equals(typeParameter, that.typeParameter); } @Override public int hashCode() { - return Objects.hash(targetType, fieldName, typeParameter); + return Objects.hash(targetClass, fieldName, typeParameter); } } - private static final Map painlessConstuctorCache = new HashMap<>(); - private static final Map painlessMethodCache = new HashMap<>(); - private static final Map painlessFieldCache = new HashMap<>(); + private static class PainlessBindingCacheKey { + + private final Class targetClass; + private final String methodName; + private final Class methodReturnType; + private final List> methodTypeParameters; + + private PainlessBindingCacheKey(Class targetClass, + String methodName, Class returnType, List> typeParameters) { + + this.targetClass = targetClass; + this.methodName = methodName; + this.methodReturnType = returnType; + this.methodTypeParameters = Collections.unmodifiableList(typeParameters); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PainlessBindingCacheKey that = (PainlessBindingCacheKey)object; + + return Objects.equals(targetClass, that.targetClass) && + Objects.equals(methodName, that.methodName) && + Objects.equals(methodReturnType, that.methodReturnType) && + Objects.equals(methodTypeParameters, that.methodTypeParameters); + } + + @Override + public int hashCode() { + return Objects.hash(targetClass, methodName, methodReturnType, methodTypeParameters); + } + } + + private static final Map painlessConstructorCache = new HashMap<>(); + private static final Map painlessMethodCache = new HashMap<>(); + private static final Map painlessFieldCache = new HashMap<>(); + private static final Map painlessBindingCache = new HashMap<>(); private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); @@ -197,6 +242,14 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { targetCanonicalClassName, whitelistField.fieldName, whitelistField.canonicalTypeNameParameter); } } + + for (WhitelistBinding whitelistBinding : whitelist.whitelistBindings) { + origin = whitelistBinding.origin; + painlessLookupBuilder.addPainlessBinding( + whitelist.classLoader, whitelistBinding.targetJavaClassName, + whitelistBinding.methodName, whitelistBinding.returnCanonicalTypeName, + whitelistBinding.canonicalTypeNameParameters); + } } } catch (Exception exception) { throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); @@ -208,9 +261,13 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClassBuilder> classesToPainlessClassBuilders; + private final Map painlessMethodKeysToPainlessBindings; + public PainlessLookupBuilder() { canonicalClassNamesToClasses = new HashMap<>(); classesToPainlessClassBuilders = new HashMap<>(); + + painlessMethodKeysToPainlessBindings = new HashMap<>(); } private Class canonicalTypeNameToType(String canonicalTypeName) { @@ -392,7 +449,7 @@ public void addPainlessConstructor(Class targetClass, List> typePara MethodType methodType = methodHandle.type(); - painlessConstructor = painlessConstuctorCache.computeIfAbsent( + painlessConstructor = painlessConstructorCache.computeIfAbsent( new PainlessConstructorCacheKey(targetClass, typeParameters), key -> new PainlessConstructor(javaConstructor, typeParameters, methodHandle, methodType) ); @@ -439,7 +496,7 @@ public void addPainlessMethod(ClassLoader classLoader, String targetCanonicalCla Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException("parameter type [" + canonicalTypeNameParameter + "] not found for method " + + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } @@ -449,7 +506,7 @@ public void addPainlessMethod(ClassLoader classLoader, String targetCanonicalCla Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException("parameter type [" + returnCanonicalTypeName + "] not found for method " + + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } @@ -548,7 +605,7 @@ public void addPainlessMethod(Class targetClass, Class augmentedClass, Str MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.staticMethods.put(painlessMethodKey, painlessMethod); @@ -588,7 +645,7 @@ public void addPainlessMethod(Class targetClass, Class augmentedClass, Str MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.methods.put(painlessMethodKey, painlessMethod); @@ -731,6 +788,183 @@ public void addPainlessField(Class targetClass, String fieldName, Class ty } } + public void addPainlessBinding(ClassLoader classLoader, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + Objects.requireNonNull(classLoader); + Objects.requireNonNull(targetJavaClassName); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnCanonicalTypeName); + Objects.requireNonNull(canonicalTypeNameParameters); + + Class targetClass; + + try { + targetClass = Class.forName(targetJavaClassName, true, classLoader); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException("class [" + targetJavaClassName + "] not found", cnfe); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); + + for (String canonicalTypeNameParameter : canonicalTypeNameParameters) { + Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); + + if (typeParameter == null) { + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + typeParameters.add(typeParameter); + } + + Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); + + if (returnType == null) { + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + addPainlessBinding(targetClass, methodName, returnType, typeParameters); + } + + public void addPainlessBinding(Class targetClass, String methodName, Class returnType, List> typeParameters) { + + Objects.requireNonNull(targetClass); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnType); + Objects.requireNonNull(typeParameters); + + if (targetClass == def.class) { + throw new IllegalArgumentException("cannot add binding as reserved class [" + DEF_CLASS_NAME + "]"); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + + Constructor[] javaConstructors = targetClass.getConstructors(); + Constructor javaConstructor = null; + + for (Constructor eachJavaConstructor : javaConstructors) { + if (eachJavaConstructor.getDeclaringClass() == targetClass) { + if (javaConstructor != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple constructors"); + } + + javaConstructor = eachJavaConstructor; + } + } + + if (javaConstructor == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one constructor"); + } + + int constructorTypeParametersSize = javaConstructor.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < constructorTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaConstructor.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + throw new IllegalArgumentException( + "invalid method name [" + methodName + "] for binding [" + targetCanonicalClassName + "]."); + } + + Method[] javaMethods = targetClass.getMethods(); + Method javaMethod = null; + + for (Method eachJavaMethod : javaMethods) { + if (eachJavaMethod.getDeclaringClass() == targetClass) { + if (javaMethod != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple methods"); + } + + javaMethod = eachJavaMethod; + } + } + + if (javaMethod == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one method"); + } + + int methodTypeParametersSize = javaMethod.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < methodTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaMethod.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (javaMethod.getReturnType() != typeToJavaType(returnType)) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " + + "does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + String painlessMethodKey = buildPainlessMethodKey(methodName, constructorTypeParametersSize + methodTypeParametersSize); + PainlessBinding painlessBinding = painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + + if (painlessBinding == null) { + Constructor finalJavaConstructor = javaConstructor; + Method finalJavaMethod = javaMethod; + + painlessBinding = painlessBindingCache.computeIfAbsent( + new PainlessBindingCacheKey(targetClass, methodName, returnType, typeParameters), + key -> new PainlessBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters)); + + painlessMethodKeysToPainlessBindings.put(painlessMethodKey, painlessBinding); + } else if (painlessBinding.javaConstructor.equals(javaConstructor) == false || + painlessBinding.javaMethod.equals(javaMethod) == false || + painlessBinding.returnType != returnType || + painlessBinding.typeParameters.equals(typeParameters) == false) { + throw new IllegalArgumentException("cannot have bindings " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(returnType) + "], " + + typesToCanonicalTypeNames(typeParameters) + "] and " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(painlessBinding.returnType) + "], " + + typesToCanonicalTypeNames(painlessBinding.typeParameters) + "] and " + + "with the same name and arity but different constructors or methods"); + } + } + public PainlessLookup build() { copyPainlessClassMembers(); cacheRuntimeHandles(); @@ -742,7 +976,7 @@ public PainlessLookup build() { classesToPainlessClasses.put(painlessClassBuilderEntry.getKey(), painlessClassBuilderEntry.getValue().build()); } - return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses); + return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses, painlessMethodKeysToPainlessBindings); } private void copyPainlessClassMembers() { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java index 9dd143a402865..89462170ae5e8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java @@ -26,6 +26,7 @@ import java.util.List; public class PainlessMethod { + public final Method javaMethod; public final Class targetClass; public final Class returnType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java index 1f9973df19224..8ae6ad9723da4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java @@ -24,8 +24,12 @@ import org.elasticsearch.painless.Locals.LocalMethod; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.lookup.PainlessBinding; +import org.objectweb.asm.Label; +import org.objectweb.asm.Type; import org.objectweb.asm.commons.Method; +import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Set; @@ -41,6 +45,7 @@ public final class ECallLocal extends AExpression { private final List arguments; private LocalMethod method = null; + private PainlessBinding binding = null; public ECallLocal(Location location, String name, List arguments) { super(location); @@ -60,32 +65,71 @@ void extractVariables(Set variables) { void analyze(Locals locals) { method = locals.getMethod(name, arguments.size()); + if (method == null) { - throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + binding = locals.getPainlessLookup().lookupPainlessBinding(name, arguments.size()); + + if (binding == null) { + throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + } } + List> typeParameters = new ArrayList<>(method == null ? binding.typeParameters : method.typeParameters); + for (int argument = 0; argument < arguments.size(); ++argument) { AExpression expression = arguments.get(argument); - expression.expected = method.typeParameters.get(argument); + expression.expected = typeParameters.get(argument); expression.internal = true; expression.analyze(locals); arguments.set(argument, expression.cast(locals)); } statement = true; - actual = method.returnType; + actual = method == null ? binding.returnType : method.returnType; } @Override void write(MethodWriter writer, Globals globals) { writer.writeDebugInfo(location); - for (AExpression argument : arguments) { - argument.write(writer, globals); - } + if (method == null) { + String name = globals.addBinding(binding.javaConstructor.getDeclaringClass()); + Type type = Type.getType(binding.javaConstructor.getDeclaringClass()); + int javaConstructorParameterCount = binding.javaConstructor.getParameterCount(); + + Label nonNull = new Label(); - writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + writer.ifNonNull(nonNull); + writer.loadThis(); + writer.newInstance(type); + writer.dup(); + + for (int argument = 0; argument < javaConstructorParameterCount; ++argument) { + arguments.get(argument).write(writer, globals); + } + + writer.invokeConstructor(type, Method.getMethod(binding.javaConstructor)); + writer.putField(CLASS_TYPE, name, type); + + writer.mark(nonNull); + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + + for (int argument = 0; argument < binding.javaMethod.getParameterCount(); ++argument) { + arguments.get(argument + javaConstructorParameterCount).write(writer, globals); + } + + writer.invokeVirtual(type, Method.getMethod(binding.javaMethod)); + } else { + for (AExpression argument : arguments) { + argument.write(writer, globals); + } + + writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + } } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 8aa72707b16fe..3e7fbe3761338 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -362,6 +362,13 @@ public void write() { clinit.endMethod(); } + // Write binding variables + for (Map.Entry> binding : globals.getBindings().entrySet()) { + String name = binding.getKey(); + String descriptor = Type.getType(binding.getValue()).getDescriptor(); + visitor.visitField(Opcodes.ACC_PRIVATE, name, descriptor, null, null).visitEnd(); + } + // Write any needsVarName methods for used variables for (org.objectweb.asm.commons.Method needsMethod : scriptClassInfo.getNeedsMethods()) { String name = needsMethod.getName(); diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index b3d9040cc6295..b74720b2d61f2 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -136,24 +136,6 @@ class org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues List getValues() } -# for testing. -# currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods -class org.elasticsearch.painless.FeatureTest no_import { - int z - () - (int,int) - int getX() - int getY() - void setX(int) - void setY(int) - boolean overloadedStatic() - boolean overloadedStatic(boolean) - Object twoFunctionsOfX(Function,Function) - void listInput(List) - int org.elasticsearch.painless.FeatureTestAugmentation getTotal() - int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) -} - class org.elasticsearch.search.lookup.FieldLookup { def getValue() List getValues() @@ -178,4 +160,26 @@ class org.elasticsearch.index.similarity.ScriptedSimilarity$Term { class org.elasticsearch.index.similarity.ScriptedSimilarity$Doc { int getLength() float getFreq() +} + +# for testing +class org.elasticsearch.painless.FeatureTest no_import { + int z + () + (int,int) + int getX() + int getY() + void setX(int) + void setY(int) + boolean overloadedStatic() + boolean overloadedStatic(boolean) + Object twoFunctionsOfX(Function,Function) + void listInput(List) + int org.elasticsearch.painless.FeatureTestAugmentation getTotal() + int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) +} + +# for testing +static { + int testAddWithState(int, int, int) bound_to org.elasticsearch.painless.BindingTest } \ No newline at end of file diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java new file mode 100644 index 0000000000000..c6d4e1974c14b --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.script.ExecutableScript; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class BindingsTests extends ScriptTestCase { + + public void testBasicBinding() { + assertEquals(15, exec("testAddWithState(4, 5, 6)")); + } + + public void testRepeatedBinding() { + String script = "testAddWithState(4, 5, params.test)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + assertEquals(14, executableScript.run()); + + executableScript.setNextVar("test", 4); + assertEquals(13, executableScript.run()); + + executableScript.setNextVar("test", 7); + assertEquals(16, executableScript.run()); + } + + public void testBoundBinding() { + String script = "testAddWithState(4, params.bound, params.test)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + executableScript.setNextVar("bound", 1); + assertEquals(10, executableScript.run()); + + executableScript.setNextVar("test", 4); + executableScript.setNextVar("bound", 2); + assertEquals(9, executableScript.run()); + } +} From f34430c4e02bb24d196b0f6c471ff5a314411d57 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Tue, 28 Aug 2018 10:33:18 -0500 Subject: [PATCH 02/52] HLRC: Use Optional in validation logic (#33104) The Validatable class comes from an old class in server code, that assumed null was returned in the event of validation having no errors. This commit changes that to use Optional, which is cleaner than passing around null objects. --- .../client/RestHighLevelClient.java | 13 ++++++------ .../org/elasticsearch/client/Validatable.java | 20 ++++++++----------- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 5d1cc3e405dbe..3145c2c771c66 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -174,6 +174,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.ServiceLoader; import java.util.Set; import java.util.function.Function; @@ -1334,9 +1335,9 @@ protected final Resp performRequest(Req request, RequestOptions options, CheckedFunction responseConverter, Set ignores) throws IOException { - ValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - throw validationException; + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + throw validationException.get(); } return internalPerformRequest(request, requestConverter, options, responseConverter, ignores); } @@ -1445,9 +1446,9 @@ protected final void performRequestAsync(Req req RequestOptions options, CheckedFunction responseConverter, ActionListener listener, Set ignores) { - ValidationException validationException = request.validate(); - if (validationException != null && validationException.validationErrors().isEmpty() == false) { - listener.onFailure(validationException); + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + listener.onFailure(validationException.get()); return; } internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java index 2efff4d3663b8..fe4a1fc42cb3b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Validatable.java @@ -18,24 +18,20 @@ */ package org.elasticsearch.client; +import java.util.Optional; + /** * Defines a validation layer for Requests. */ public interface Validatable { - ValidationException EMPTY_VALIDATION = new ValidationException() { - @Override - public void addValidationError(String error) { - throw new UnsupportedOperationException("Validation messages should not be added to the empty validation"); - } - }; - /** - * Perform validation. This method does not have to be overridden in the event that no validation needs to be done. + * Perform validation. This method does not have to be overridden in the event that no validation needs to be done, + * or the validation was done during object construction time. A {@link ValidationException} that is not null is + * assumed to contain validation errors and will be thrown. * - * @return potentially null, in the event of older actions, an empty {@link ValidationException} in newer actions, or finally a - * {@link ValidationException} that contains a list of all failed validation. + * @return An {@link Optional} {@link ValidationException} that contains a list of validation errors. */ - default ValidationException validate() { - return EMPTY_VALIDATION; + default Optional validate() { + return Optional.empty(); } } From af5ccfd14efd2dcf8e17eb3f191e78011ce3c7ba Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Tue, 28 Aug 2018 17:23:41 -0500 Subject: [PATCH 03/52] HLRC: create base timed request class (#33216) There are many requests that allow the user to set a few timeouts on. This class will allow requests impl'd in HLRC to extend from, and allow users to set those values without significant work to add them to every request. --- .../elasticsearch/client/TimedRequest.java | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java new file mode 100644 index 0000000000000..af8fbe3e72b37 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.common.unit.TimeValue; + +/** + * A base request for any requests that supply timeouts. + * + * Please note, any requests that use a ackTimeout should set timeout as they + * represent the same backing field on the server. + */ +public class TimedRequest implements Validatable { + + private TimeValue timeout; + private TimeValue masterTimeout; + + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + + } + + public void setMasterTimeout(TimeValue masterTimeout) { + this.masterTimeout = masterTimeout; + } + + /** + * Returns the request timeout + */ + public TimeValue timeout() { + return timeout; + } + + /** + * Returns the timeout for the request to be completed on the master node + */ + public TimeValue masterNodeTimeout() { + return masterTimeout; + } +} From 0769b64a47a9075e791949f0289ae559b6a7bb68 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 29 Aug 2018 20:28:21 +0200 Subject: [PATCH 04/52] [Rollup] Re-factor Rollup Indexer into a generic indexer for re-usability (#32743) This extracts a super class out of the rollup indexer called the AsyncTwoPhaseIterator. The implementor of it can define the query, transformation of the response, indexing and the object to persist the position/state of the indexer. The stats object used by the indexer to record progress is also now abstract, allowing the implementation provide custom stats beyond what the indexer provides. It also allows the implementation to decide how the stats are presented (leaves toXContent() up to the implementation). This should allow new projects to reuse the search-then-index persistent task that Rollup uses, but without the restrictions/baggage of how Rollup has to work internally to satisfy time-based rollups. --- .../core/indexing/AsyncTwoPhaseIndexer.java | 385 ++++++++++++++++++ .../xpack/core/indexing/IndexerJobStats.java | 114 ++++++ .../job => indexing}/IndexerState.java | 2 +- .../xpack/core/indexing/IterationResult.java | 62 +++ .../rollup/action/GetRollupJobsAction.java | 25 +- .../rollup/job/RollupIndexerJobStats.java | 70 ++++ .../xpack/core/rollup/job/RollupJobStats.java | 156 ------- .../core/rollup/job/RollupJobStatus.java | 1 + .../indexing/AsyncTwoPhaseIndexerTests.java | 143 +++++++ .../IndexerStateEnumTests.java | 2 +- .../job/JobWrapperSerializingTests.java | 4 +- .../job/RollupIndexerJobStatsTests.java | 34 ++ .../core/rollup/job/RollupJobStatsTests.java | 35 -- .../core/rollup/job/RollupJobStatusTests.java | 1 + .../xpack/rollup/job/IndexerUtils.java | 4 +- .../xpack/rollup/job/RollupIndexer.java | 358 ++-------------- .../xpack/rollup/job/RollupJobTask.java | 6 +- .../rollup/rest/RestGetRollupJobsAction.java | 8 +- .../xpack/rollup/job/IndexerUtilsTests.java | 21 +- .../job/RollupIndexerIndexingTests.java | 2 +- .../rollup/job/RollupIndexerStateTests.java | 12 +- .../xpack/rollup/job/RollupJobTaskTests.java | 2 +- .../rest-api-spec/test/rollup/get_jobs.yml | 1 + 23 files changed, 900 insertions(+), 548 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/{rollup/job => indexing}/IndexerState.java (97%) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/{rollup/job => indexing}/IndexerStateEnumTests.java (98%) create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStatsTests.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java new file mode 100644 index 0000000000000..ee0c0de97e0ae --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -0,0 +1,385 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.apache.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An abstract class that builds an index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, + * it will create the index from the source index up to the last complete bucket that is allowed to be built (based on job position). + * Only one background job can run simultaneously and {@link #onFinish()} is called when the job + * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is + * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when + * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * + * In a nutshell this is a 2 cycle engine: 1st it sends a query, 2nd it indexes documents based on the response, sends the next query, + * indexes, queries, indexes, ... until a condition lets the engine pause until the source provides new input. + * + * @param Type that defines a job position to be defined by the implementation. + */ +public abstract class AsyncTwoPhaseIndexer { + private static final Logger logger = Logger.getLogger(AsyncTwoPhaseIndexer.class.getName()); + + private final JobStats stats; + + private final AtomicReference state; + private final AtomicReference position; + private final Executor executor; + + protected AsyncTwoPhaseIndexer(Executor executor, AtomicReference initialState, + JobPosition initialPosition, JobStats jobStats) { + this.executor = executor; + this.state = initialState; + this.position = new AtomicReference<>(initialPosition); + this.stats = jobStats; + } + + /** + * Get the current state of the indexer. + */ + public IndexerState getState() { + return state.get(); + } + + /** + * Get the current position of the indexer. + */ + public JobPosition getPosition() { + return position.get(); + } + + /** + * Get the stats of this indexer. + */ + public JobStats getStats() { + return stats; + } + + /** + * Sets the internal state to {@link IndexerState#STARTED} if the previous state + * was {@link IndexerState#STOPPED}. Setting the state to STARTED allows a job + * to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. + * + * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState start() { + state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); + return state.get(); + } + + /** + * Sets the internal state to {@link IndexerState#STOPPING} if an async job is + * running in the background and in such case {@link #onFinish()} will be called + * as soon as the background job detects that the indexer is stopped. If there + * is no job running when this function is called, the state is directly set to + * {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. + * + * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState stop() { + IndexerState currentState = state.updateAndGet(previousState -> { + if (previousState == IndexerState.INDEXING) { + return IndexerState.STOPPING; + } else if (previousState == IndexerState.STARTED) { + return IndexerState.STOPPED; + } else { + return previousState; + } + }); + return currentState; + } + + /** + * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if + * an async job is running in the background and in such case {@link #onAbort} + * will be called as soon as the background job detects that the indexer is + * aborted. If there is no job running when this function is called, it returns + * true and {@link #onAbort()} will never be called. + * + * @return true if the indexer is aborted, false if a background job is running + * and abort is delayed. + */ + public synchronized boolean abort() { + IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); + return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; + } + + /** + * Triggers a background job that builds the index asynchronously iff + * there is no other job that runs and the indexer is started + * ({@link IndexerState#STARTED}. + * + * @param now + * The current time in milliseconds (used to limit the job to + * complete buckets) + * @return true if a job has been triggered, false otherwise + */ + public synchronized boolean maybeTriggerAsyncJob(long now) { + final IndexerState currentState = state.get(); + switch (currentState) { + case INDEXING: + case STOPPING: + case ABORTING: + logger.warn("Schedule was triggered for job [" + getJobId() + "], but prior indexer is still running."); + return false; + + case STOPPED: + logger.debug("Schedule was triggered for job [" + getJobId() + "] but job is stopped. Ignoring trigger."); + return false; + + case STARTED: + logger.debug("Schedule was triggered for job [" + getJobId() + "], state: [" + currentState + "]"); + stats.incrementNumInvocations(1); + onStartJob(now); + + if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { + // fire off the search. Note this is async, the method will return from here + executor.execute(() -> doNextSearch(buildSearchRequest(), + ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); + logger.debug("Beginning to index [" + getJobId() + "], state: [" + currentState + "]"); + return true; + } else { + logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); + return false; + } + + default: + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Job encountered an illegal state [" + currentState + "]"); + } + } + + /** + * Called to get the Id of the job, used for logging. + * + * @return a string with the id of the job + */ + protected abstract String getJobId(); + + /** + * Called to process a response from the 1 search request in order to turn it into a {@link IterationResult}. + * + * @param searchResponse response from the search phase. + * @return Iteration object to be passed to indexing phase. + */ + protected abstract IterationResult doProcess(SearchResponse searchResponse); + + /** + * Called to build the next search request. + * + * @return SearchRequest to be passed to the search phase. + */ + protected abstract SearchRequest buildSearchRequest(); + + /** + * Called at startup after job has been triggered using {@link #maybeTriggerAsyncJob(long)} and the + * internal state is {@link IndexerState#STARTED}. + * + * @param now The current time in milliseconds passed through from {@link #maybeTriggerAsyncJob(long)} + */ + protected abstract void onStartJob(long now); + + /** + * Executes the {@link SearchRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The search request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); + + /** + * Executes the {@link BulkRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The bulk request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); + + /** + * Called periodically during the execution of a background job. Implementation + * should persists the state somewhere and continue the execution asynchronously + * using next. + * + * @param state + * The current state of the indexer + * @param position + * The current position of the indexer + * @param next + * Runnable for the next phase + */ + protected abstract void doSaveState(IndexerState state, JobPosition position, Runnable next); + + /** + * Called when a failure occurs in an async job causing the execution to stop. + * + * @param exc + * The exception + */ + protected abstract void onFailure(Exception exc); + + /** + * Called when a background job finishes. + */ + protected abstract void onFinish(); + + /** + * Called when a background job detects that the indexer is aborted causing the + * async execution to stop. + */ + protected abstract void onAbort(); + + private void finishWithFailure(Exception exc) { + doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); + } + + private IndexerState finishAndSetState() { + return state.updateAndGet(prev -> { + switch (prev) { + case INDEXING: + // ready for another job + return IndexerState.STARTED; + + case STOPPING: + // must be started again + return IndexerState.STOPPED; + + case ABORTING: + // abort and exit + onAbort(); + return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first + + case STOPPED: + // No-op. Shouldn't really be possible to get here (should have to go through + // STOPPING + // first which will be handled) but is harmless to no-op and we don't want to + // throw exception here + return IndexerState.STOPPED; + + default: + // any other state is unanticipated at this point + throw new IllegalStateException("Indexer job encountered an illegal state [" + prev + "]"); + } + }); + } + + private void onSearchResponse(SearchResponse searchResponse) { + try { + if (checkState(getState()) == false) { + return; + } + if (searchResponse.getShardFailures().length != 0) { + throw new RuntimeException("Shard failures encountered while running indexer for job [" + getJobId() + "]: " + + Arrays.toString(searchResponse.getShardFailures())); + } + + stats.incrementNumPages(1); + IterationResult iterationResult = doProcess(searchResponse); + + if (iterationResult.isDone()) { + logger.debug("Finished indexing for job [" + getJobId() + "], saving state and shutting down."); + + // Change state first, then try to persist. This prevents in-progress + // STOPPING/ABORTING from + // being persisted as STARTED but then stop the job + doSaveState(finishAndSetState(), position.get(), this::onFinish); + return; + } + + final List docs = iterationResult.getToIndex(); + final BulkRequest bulkRequest = new BulkRequest(); + docs.forEach(bulkRequest::add); + + // TODO this might be a valid case, e.g. if implementation filters + assert bulkRequest.requests().size() > 0; + + doNextBulk(bulkRequest, ActionListener.wrap(bulkResponse -> { + // TODO we should check items in the response and move after accordingly to + // resume the failing buckets ? + if (bulkResponse.hasFailures()) { + logger.warn("Error while attempting to bulk index documents: " + bulkResponse.buildFailureMessage()); + } + stats.incrementNumOutputDocuments(bulkResponse.getItems().length); + if (checkState(getState()) == false) { + return; + } + + JobPosition newPosition = iterationResult.getPosition(); + position.set(newPosition); + + onBulkResponse(bulkResponse, newPosition); + }, exc -> finishWithFailure(exc))); + } catch (Exception e) { + finishWithFailure(e); + } + } + + private void onBulkResponse(BulkResponse response, JobPosition position) { + try { + + ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); + // TODO probably something more intelligent than every-50 is needed + if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { + doSaveState(IndexerState.INDEXING, position, () -> doNextSearch(buildSearchRequest(), listener)); + } else { + doNextSearch(buildSearchRequest(), listener); + } + } catch (Exception e) { + finishWithFailure(e); + } + } + + /** + * Checks the {@link IndexerState} and returns false if the execution should be + * stopped. + */ + private boolean checkState(IndexerState currentState) { + switch (currentState) { + case INDEXING: + // normal state; + return true; + + case STOPPING: + logger.info("Indexer job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); + doSaveState(finishAndSetState(), getPosition(), () -> { + }); + return false; + + case STOPPED: + return false; + + case ABORTING: + logger.info("Requested shutdown of indexer for job [" + getJobId() + "]"); + onAbort(); + return false; + + default: + // Anything other than indexing, aborting or stopping is unanticipated + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Indexer job encountered an illegal state [" + currentState + "]"); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java new file mode 100644 index 0000000000000..2453504a5ba77 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class holds the runtime statistics of a job. The stats are not used by any internal process + * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the + * allocated task is shutdown/restarted on a different node all the stats will reset. + */ +public abstract class IndexerJobStats implements ToXContentObject, Writeable { + + public static final ParseField NAME = new ParseField("job_stats"); + + protected long numPages = 0; + protected long numInputDocuments = 0; + protected long numOuputDocuments = 0; + protected long numInvocations = 0; + + public IndexerJobStats() { + } + + public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + this.numPages = numPages; + this.numInputDocuments = numInputDocuments; + this.numOuputDocuments = numOuputDocuments; + this.numInvocations = numInvocations; + } + + public IndexerJobStats(StreamInput in) throws IOException { + this.numPages = in.readVLong(); + this.numInputDocuments = in.readVLong(); + this.numOuputDocuments = in.readVLong(); + this.numInvocations = in.readVLong(); + } + + public long getNumPages() { + return numPages; + } + + public long getNumDocuments() { + return numInputDocuments; + } + + public long getNumInvocations() { + return numInvocations; + } + + public long getOutputDocuments() { + return numOuputDocuments; + } + + public void incrementNumPages(long n) { + assert(n >= 0); + numPages += n; + } + + public void incrementNumDocuments(long n) { + assert(n >= 0); + numInputDocuments += n; + } + + public void incrementNumInvocations(long n) { + assert(n >= 0); + numInvocations += n; + } + + public void incrementNumOutputDocuments(long n) { + assert(n >= 0); + numOuputDocuments += n; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numPages); + out.writeVLong(numInputDocuments); + out.writeVLong(numOuputDocuments); + out.writeVLong(numInvocations); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + IndexerJobStats that = (IndexerJobStats) other; + + return Objects.equals(this.numPages, that.numPages) + && Objects.equals(this.numInputDocuments, that.numInputDocuments) + && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) + && Objects.equals(this.numInvocations, that.numInvocations); + } + + @Override + public int hashCode() { + return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java similarity index 97% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java index 6e211c1df9e3e..1b6b9a943cba2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java new file mode 100644 index 0000000000000..1261daf185b48 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.index.IndexRequest; + +import java.util.List; + +/** + * Result object to hold the result of 1 iteration of iterative indexing. + * Acts as an interface between the implementation and the generic indexer. + */ +public class IterationResult { + + private final boolean isDone; + private final JobPosition position; + private final List toIndex; + + /** + * Constructor for the result of 1 iteration. + * + * @param toIndex the list of requests to be indexed + * @param position the extracted, persistable position of the job required for the search phase + * @param isDone true if source is exhausted and job should go to sleep + * + * Note: toIndex.empty() != isDone due to possible filtering in the specific implementation + */ + public IterationResult(List toIndex, JobPosition position, boolean isDone) { + this.toIndex = toIndex; + this.position = position; + this.isDone = isDone; + } + + /** + * Returns true if this indexing iteration is done and job should go into sleep mode. + */ + public boolean isDone() { + return isDone; + } + + /** + * Return the position of the job, a generic to be passed to the next query construction. + * + * @return the position + */ + public JobPosition getPosition() { + return position; + } + + /** + * List of requests to be passed to bulk indexing. + * + * @return List of index requests. + */ + public List getToIndex() { + return toIndex; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java index 4fa5d84c7d476..8b25b99000b52 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -26,8 +26,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import java.io.IOException; @@ -180,7 +180,14 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(JOBS.getPreferredName(), jobs); + + // XContentBuilder does not support passing the params object for Iterables + builder.field(JOBS.getPreferredName()); + builder.startArray(); + for (JobWrapper job : jobs) { + job.toXContent(builder, params); + } + builder.endArray(); builder.endObject(); return builder; } @@ -210,20 +217,20 @@ public final String toString() { public static class JobWrapper implements Writeable, ToXContentObject { private final RollupJobConfig job; - private final RollupJobStats stats; + private final RollupIndexerJobStats stats; private final RollupJobStatus status; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, a -> new JobWrapper((RollupJobConfig) a[0], - (RollupJobStats) a[1], (RollupJobStatus)a[2])); + (RollupIndexerJobStats) a[1], (RollupJobStatus)a[2])); static { PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> RollupJobConfig.fromXContent(p, null), CONFIG); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStats.PARSER::apply, STATS); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupIndexerJobStats.PARSER::apply, STATS); PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStatus.PARSER::apply, STATUS); } - public JobWrapper(RollupJobConfig job, RollupJobStats stats, RollupJobStatus status) { + public JobWrapper(RollupJobConfig job, RollupIndexerJobStats stats, RollupJobStatus status) { this.job = job; this.stats = stats; this.status = status; @@ -231,7 +238,7 @@ public JobWrapper(RollupJobConfig job, RollupJobStats stats, RollupJobStatus sta public JobWrapper(StreamInput in) throws IOException { this.job = new RollupJobConfig(in); - this.stats = new RollupJobStats(in); + this.stats = new RollupIndexerJobStats(in); this.status = new RollupJobStatus(in); } @@ -246,7 +253,7 @@ public RollupJobConfig getJob() { return job; } - public RollupJobStats getStats() { + public RollupIndexerJobStats getStats() { return stats; } @@ -260,7 +267,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(CONFIG.getPreferredName()); job.toXContent(builder, params); builder.field(STATUS.getPreferredName(), status); - builder.field(STATS.getPreferredName(), stats); + builder.field(STATS.getPreferredName(), stats, params); builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java new file mode 100644 index 0000000000000..87915671b79a2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexing.IndexerJobStats; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The Rollup specialization of stats for the AsyncTwoPhaseIndexer. + * Note: instead of `documents_indexed`, this XContent show `rollups_indexed` + */ +public class RollupIndexerJobStats extends IndexerJobStats { + private static ParseField NUM_PAGES = new ParseField("pages_processed"); + private static ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); + private static ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("rollups_indexed"); + private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME.getPreferredName(), + args -> new RollupIndexerJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); + + static { + PARSER.declareLong(constructorArg(), NUM_PAGES); + PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); + } + + public RollupIndexerJobStats() { + super(); + } + + public RollupIndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + super(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } + + public RollupIndexerJobStats(StreamInput in) throws IOException { + super(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_PAGES.getPreferredName(), numPages); + builder.field(NUM_INPUT_DOCUMENTS.getPreferredName(), numInputDocuments); + builder.field(NUM_OUTPUT_DOCUMENTS.getPreferredName(), numOuputDocuments); + builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); + builder.endObject(); + return builder; + } + + public static RollupIndexerJobStats fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java deleted file mode 100644 index 06cfb520af552..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - -/** - * This class holds the runtime statistics of a job. The stats are not used by any internal process - * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the - * allocated task is shutdown/restarted on a different node all the stats will reset. - */ -public class RollupJobStats implements ToXContentObject, Writeable { - - public static final ParseField NAME = new ParseField("job_stats"); - - private static ParseField NUM_PAGES = new ParseField("pages_processed"); - private static ParseField NUM_DOCUMENTS = new ParseField("documents_processed"); - private static ParseField NUM_ROLLUPS = new ParseField("rollups_indexed"); - private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); - - private long numPages = 0; - private long numDocuments = 0; - private long numRollups = 0; - private long numInvocations = 0; - - public static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(NAME.getPreferredName(), - args -> new RollupJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); - - static { - PARSER.declareLong(constructorArg(), NUM_PAGES); - PARSER.declareLong(constructorArg(), NUM_DOCUMENTS); - PARSER.declareLong(constructorArg(), NUM_ROLLUPS); - PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); - } - - public RollupJobStats() { - } - - public RollupJobStats(long numPages, long numDocuments, long numRollups, long numInvocations) { - this.numPages = numPages; - this.numDocuments = numDocuments; - this.numRollups = numRollups; - this.numInvocations = numInvocations; - } - - public RollupJobStats(StreamInput in) throws IOException { - this.numPages = in.readVLong(); - this.numDocuments = in.readVLong(); - this.numRollups = in.readVLong(); - this.numInvocations = in.readVLong(); - } - - public long getNumPages() { - return numPages; - } - - public long getNumDocuments() { - return numDocuments; - } - - public long getNumInvocations() { - return numInvocations; - } - - public long getNumRollups() { - return numRollups; - } - - public void incrementNumPages(long n) { - assert(n >= 0); - numPages += n; - } - - public void incrementNumDocuments(long n) { - assert(n >= 0); - numDocuments += n; - } - - public void incrementNumInvocations(long n) { - assert(n >= 0); - numInvocations += n; - } - - public void incrementNumRollups(long n) { - assert(n >= 0); - numRollups += n; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(numPages); - out.writeVLong(numDocuments); - out.writeVLong(numRollups); - out.writeVLong(numInvocations); - } - - public static RollupJobStats fromXContent(XContentParser parser) { - try { - return PARSER.parse(parser, null); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(NUM_PAGES.getPreferredName(), numPages); - builder.field(NUM_DOCUMENTS.getPreferredName(), numDocuments); - builder.field(NUM_ROLLUPS.getPreferredName(), numRollups); - builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - - if (other == null || getClass() != other.getClass()) { - return false; - } - - RollupJobStats that = (RollupJobStats) other; - - return Objects.equals(this.numPages, that.numPages) - && Objects.equals(this.numDocuments, that.numDocuments) - && Objects.equals(this.numRollups, that.numRollups) - && Objects.equals(this.numInvocations, that.numInvocations); - } - - @Override - public int hashCode() { - return Objects.hash(numPages, numDocuments, numRollups, numInvocations); - } - -} - diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 640385c9c80d5..0a2f046907c80 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; import java.util.HashMap; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java new file mode 100644 index 0000000000000..2662e05570c6d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponseSections; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class AsyncTwoPhaseIndexerTests extends ESTestCase { + + AtomicBoolean isFinished = new AtomicBoolean(false); + + private class MockIndexer extends AsyncTwoPhaseIndexer { + + // test the execution order + private int step; + + protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition) { + super(executor, initialState, initialPosition, new MockJobStats()); + } + + @Override + protected String getJobId() { + return "mock"; + } + + @Override + protected IterationResult doProcess(SearchResponse searchResponse) { + assertThat(step, equalTo(3)); + ++step; + return new IterationResult(Collections.emptyList(), 3, true); + } + + @Override + protected SearchRequest buildSearchRequest() { + assertThat(step, equalTo(1)); + ++step; + return null; + } + + @Override + protected void onStartJob(long now) { + assertThat(step, equalTo(0)); + ++step; + } + + @Override + protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { + assertThat(step, equalTo(2)); + ++step; + final SearchResponseSections sections = new SearchResponseSections(new SearchHits(new SearchHit[0], 0, 0), null, null, false, + null, null, 1); + + nextPhase.onResponse(new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null)); + } + + @Override + protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { + fail("should not be called"); + } + + @Override + protected void doSaveState(IndexerState state, Integer position, Runnable next) { + assertThat(step, equalTo(4)); + ++step; + next.run(); + } + + @Override + protected void onFailure(Exception exc) { + fail(exc.getMessage()); + } + + @Override + protected void onFinish() { + assertThat(step, equalTo(5)); + ++step; + isFinished.set(true); + } + + @Override + protected void onAbort() { + } + + public int getStep() { + return step; + } + + } + + private static class MockJobStats extends IndexerJobStats { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return null; + } + } + + public void testStateMachine() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + + try { + + MockIndexer indexer = new MockIndexer(executor, state, 2); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + assertThat(indexer.getPosition(), equalTo(2)); + ESTestCase.awaitBusy(() -> isFinished.get()); + assertThat(indexer.getStep(), equalTo(6)); + assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); + assertThat(indexer.getStats().getNumPages(), equalTo(1L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); + assertTrue(indexer.abort()); + } finally { + executor.shutdownNow(); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java index ec17a37e23b2b..329800c2f1a24 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java index a0df63bc38dde..1ab6e6a55d495 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; @@ -40,7 +41,8 @@ protected GetRollupJobsAction.JobWrapper createTestInstance() { } return new GetRollupJobsAction.JobWrapper(ConfigTestHelpers.randomRollupJobConfig(random()), - new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), + new RollupIndexerJobStats(randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()), new RollupJobStatus(state, Collections.emptyMap(), randomBoolean())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStatsTests.java new file mode 100644 index 0000000000000..81f31e2e5c4eb --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStatsTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +public class RollupIndexerJobStatsTests extends AbstractSerializingTestCase { + + @Override + protected RollupIndexerJobStats createTestInstance() { + return randomStats(); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupIndexerJobStats::new; + } + + @Override + protected RollupIndexerJobStats doParseInstance(XContentParser parser) { + return RollupIndexerJobStats.fromXContent(parser); + } + + public static RollupIndexerJobStats randomStats() { + return new RollupIndexerJobStats(randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java deleted file mode 100644 index 0091b21dc40d0..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; - -public class RollupJobStatsTests extends AbstractSerializingTestCase { - - @Override - protected RollupJobStats createTestInstance() { - return randomStats(); - } - - @Override - protected Writeable.Reader instanceReader() { - return RollupJobStats::new; - } - - @Override - protected RollupJobStats doParseInstance(XContentParser parser) { - return RollupJobStats.fromXContent(parser); - } - - public static RollupJobStats randomStats() { - return new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong(), randomNonNegativeLong()); - } -} - diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java index 2c802a7e41dc3..f46bda788bf5b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.util.HashMap; import java.util.Map; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java index 9119a5445d42e..94d64b17de8f3 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.rollup.Rollup; import java.util.ArrayList; @@ -46,7 +46,7 @@ class IndexerUtils { * @param isUpgradedDocID `true` if this job is using the new ID scheme * @return A list of rolled documents derived from the response */ - static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupJobStats stats, + static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupIndexerJobStats stats, GroupConfig groupConfig, String jobId, boolean isUpgradedDocID) { logger.debug("Buckets: [" + agg.getBuckets().size() + "][" + jobId + "]"); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 6abb7ffa56754..b1b052a3659d6 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -5,11 +5,6 @@ */ package org.elasticsearch.xpack.rollup.job; -import org.apache.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.unit.TimeValue; @@ -33,20 +28,22 @@ import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.indexing.IterationResult; +import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,30 +51,16 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.singletonList; -import static java.util.Collections.unmodifiableList; import static org.elasticsearch.xpack.core.rollup.RollupField.formatFieldName; /** - * An abstract class that builds a rollup index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, - * it will create the rollup index from the source index up to the last complete bucket that is allowed to be built (based on the current - * time and the delay set on the rollup job). Only one background job can run simultaneously and {@link #onFinish()} is called when the job - * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is - * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when - * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * An abstract implementation of {@link AsyncTwoPhaseIndexer} that builds a rollup index incrementally. */ -public abstract class RollupIndexer { - private static final Logger logger = Logger.getLogger(RollupIndexer.class.getName()); - +public abstract class RollupIndexer extends AsyncTwoPhaseIndexer, RollupIndexerJobStats> { static final String AGGREGATION_NAME = RollupField.NAME; private final RollupJob job; - private final RollupJobStats stats; - private final AtomicReference state; - private final AtomicReference> position; - private final Executor executor; protected final AtomicBoolean upgradedDocumentID; - private final CompositeAggregationBuilder compositeBuilder; private long maxBoundary; @@ -87,84 +70,16 @@ public abstract class RollupIndexer { * @param job The rollup job * @param initialState Initial state for the indexer * @param initialPosition The last indexed bucket of the task + * @param upgradedDocumentID whether job has updated IDs (for BWC) */ - RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, - Map initialPosition, AtomicBoolean upgradedDocumentID) { - this.executor = executor; + RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, Map initialPosition, + AtomicBoolean upgradedDocumentID) { + super(executor, initialState, initialPosition, new RollupIndexerJobStats()); this.job = job; - this.stats = new RollupJobStats(); - this.state = initialState; - this.position = new AtomicReference<>(initialPosition); this.compositeBuilder = createCompositeBuilder(job.getConfig()); this.upgradedDocumentID = upgradedDocumentID; } - /** - * Executes the {@link SearchRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The search request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); - - /** - * Executes the {@link BulkRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The bulk request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); - - /** - * Called periodically during the execution of a background job. Implementation should - * persists the state somewhere and continue the execution asynchronously using next. - * - * @param state The current state of the indexer - * @param position The current position of the indexer - * @param next Runnable for the next phase - */ - protected abstract void doSaveState(IndexerState state, Map position, Runnable next); - - /** - * Called when a failure occurs in an async job causing the execution to stop. - * @param exc The exception - */ - protected abstract void onFailure(Exception exc); - - /** - * Called when a background job finishes. - */ - protected abstract void onFinish(); - - /** - * Called when a background job detects that the indexer is aborted causing the async execution - * to stop. - */ - protected abstract void onAbort(); - - /** - * Get the current state of the indexer. - */ - public IndexerState getState() { - return state.get(); - } - - /** - * Get the current position of the indexer. - */ - public Map getPosition() { - return position.get(); - } - - /** - * Get the stats of this indexer. - */ - public RollupJobStats getStats() { - return stats; - } - /** * Returns if this job has upgraded it's ID scheme yet or not */ @@ -172,229 +87,28 @@ public boolean isUpgradedDocumentID() { return upgradedDocumentID.get(); } - /** - * Sets the internal state to {@link IndexerState#STARTED} if the previous state was {@link IndexerState#STOPPED}. Setting the state to - * STARTED allows a job to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. - * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState start() { - state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); - return state.get(); + @Override + protected String getJobId() { + return job.getConfig().getId(); } - /** - * Sets the internal state to {@link IndexerState#STOPPING} if an async job is running in the background and in such case - * {@link #onFinish()} will be called as soon as the background job detects that the indexer is stopped. If there is no job running when - * this function is called, the state is directly set to {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. - * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState stop() { - IndexerState currentState = state.updateAndGet(previousState -> { - if (previousState == IndexerState.INDEXING) { - return IndexerState.STOPPING; - } else if (previousState == IndexerState.STARTED) { - return IndexerState.STOPPED; - } else { - return previousState; - } - }); - return currentState; - } - - /** - * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if an async job is running in the background and in such - * case {@link #onAbort} will be called as soon as the background job detects that the indexer is aborted. If there is no job running - * when this function is called, it returns true and {@link #onAbort()} will never be called. - * @return true if the indexer is aborted, false if a background job is running and abort is delayed. - */ - public synchronized boolean abort() { - IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); - return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; - } - - /** - * Triggers a background job that builds the rollup index asynchronously iff there is no other job that runs - * and the indexer is started ({@link IndexerState#STARTED}. - * - * @param now The current time in milliseconds (used to limit the job to complete buckets) - * @return true if a job has been triggered, false otherwise - */ - public synchronized boolean maybeTriggerAsyncJob(long now) { - final IndexerState currentState = state.get(); - switch (currentState) { - case INDEXING: - case STOPPING: - case ABORTING: - logger.warn("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], but prior indexer is still running."); - return false; - - case STOPPED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() - + "] but job is stopped. Ignoring trigger."); - return false; - - case STARTED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - // Only valid time to start indexing is when we are STARTED but not currently INDEXING. - stats.incrementNumInvocations(1); - - // rounds the current time to its current bucket based on the date histogram interval. - // this is needed to exclude buckets that can still receive new documents. - DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); - long rounded = dateHisto.createRounding().round(now); - if (dateHisto.getDelay() != null) { - // if the job has a delay we filter all documents that appear before it. - maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); - } else { - maxBoundary = rounded; - } - - if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { - // fire off the search. Note this is async, the method will return from here - executor.execute(() -> doNextSearch(buildSearchRequest(), - ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); - logger.debug("Beginning to rollup [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - return true; - } else { - logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); - return false; - } - - default: - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); - } - } - - /** - * Checks the {@link IndexerState} and returns false if the execution - * should be stopped. - */ - private boolean checkState(IndexerState currentState) { - switch (currentState) { - case INDEXING: - // normal state; - return true; - - case STOPPING: - logger.info("Rollup job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); - doSaveState(finishAndSetState(), getPosition(), () -> {}); - return false; - - case STOPPED: - return false; - - case ABORTING: - logger.info("Requested shutdown of indexer for job [" + job.getConfig().getId() + "]"); - onAbort(); - return false; - - default: - // Anything other than indexing, aborting or stopping is unanticipated - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); - } - } - - private void onBulkResponse(BulkResponse response, Map after) { - // TODO we should check items in the response and move after accordingly to resume the failing buckets ? - stats.incrementNumRollups(response.getItems().length); - if (response.hasFailures()) { - logger.warn("Error while attempting to bulk index rollup documents: " + response.buildFailureMessage()); - } - try { - if (checkState(getState()) == false) { - return ; - } - position.set(after); - ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); - // TODO probably something more intelligent than every-50 is needed - if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { - doSaveState(IndexerState.INDEXING, after, () -> doNextSearch(buildSearchRequest(), listener)); - } else { - doNextSearch(buildSearchRequest(), listener); - } - } catch (Exception e) { - finishWithFailure(e); + @Override + protected void onStartJob(long now) { + // this is needed to exclude buckets that can still receive new documents. + DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); + long rounded = dateHisto.createRounding().round(now); + if (dateHisto.getDelay() != null) { + // if the job has a delay we filter all documents that appear before it. + maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); + } else { + maxBoundary = rounded; } } - private void onSearchResponse(SearchResponse searchResponse) { - try { - if (checkState(getState()) == false) { - return ; - } - if (searchResponse.getShardFailures().length != 0) { - throw new RuntimeException("Shard failures encountered while running indexer for rollup job [" - + job.getConfig().getId() + "]: " + Arrays.toString(searchResponse.getShardFailures())); - } - final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); - if (response == null) { - throw new IllegalStateException("Missing composite response for query: " + compositeBuilder.toString()); - } - stats.incrementNumPages(1); - if (response.getBuckets().isEmpty()) { - // this is the end... - logger.debug("Finished indexing for job [" + job.getConfig().getId() + "], saving state and shutting down."); - - // Change state first, then try to persist. This prevents in-progress STOPPING/ABORTING from - // being persisted as STARTED but then stop the job - doSaveState(finishAndSetState(), position.get(), this::onFinish); - return; - } - - final BulkRequest bulkRequest = new BulkRequest(); + @Override + protected SearchRequest buildSearchRequest() { // Indexer is single-threaded, and only place that the ID scheme can get upgraded is doSaveState(), so // we can pass down the boolean value rather than the atomic here - final List docs = IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), - stats, job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()); - docs.forEach(bulkRequest::add); - assert bulkRequest.requests().size() > 0; - doNextBulk(bulkRequest, - ActionListener.wrap( - bulkResponse -> onBulkResponse(bulkResponse, response.afterKey()), - exc -> finishWithFailure(exc) - ) - ); - } catch(Exception e) { - finishWithFailure(e); - } - } - - private void finishWithFailure(Exception exc) { - doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); - } - - private IndexerState finishAndSetState() { - return state.updateAndGet( - prev -> { - switch (prev) { - case INDEXING: - // ready for another job - return IndexerState.STARTED; - - case STOPPING: - // must be started again - return IndexerState.STOPPED; - - case ABORTING: - // abort and exit - onAbort(); - return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first - - case STOPPED: - // No-op. Shouldn't really be possible to get here (should have to go through STOPPING - // first which will be handled) but is harmless to no-op and we don't want to throw exception here - return IndexerState.STOPPED; - - default: - // any other state is unanticipated at this point - throw new IllegalStateException("Rollup job encountered an illegal state [" + prev + "]"); - } - }); - } - - private SearchRequest buildSearchRequest() { final Map position = getPosition(); SearchSourceBuilder searchSource = new SearchSourceBuilder() .size(0) @@ -405,6 +119,16 @@ private SearchRequest buildSearchRequest() { return new SearchRequest(job.getConfig().getIndexPattern()).source(searchSource); } + @Override + protected IterationResult> doProcess(SearchResponse searchResponse) { + final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); + + return new IterationResult<>( + IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), getStats(), + job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()), + response.afterKey(), response.getBuckets().isEmpty()); + } + /** * Creates a skeleton {@link CompositeAggregationBuilder} from the provided job config. * @param config The config for the job. @@ -481,7 +205,7 @@ public static List> createValueSourceBuilders(fi final TermsGroupConfig terms = groupConfig.getTerms(); builders.addAll(createValueSourceBuilders(terms)); } - return unmodifiableList(builders); + return Collections.unmodifiableList(builders); } public static List> createValueSourceBuilders(final DateHistogramGroupConfig dateHistogram) { @@ -491,7 +215,7 @@ public static List> createValueSourceBuilders(fi dateHistogramBuilder.dateHistogramInterval(dateHistogram.getInterval()); dateHistogramBuilder.field(dateHistogramField); dateHistogramBuilder.timeZone(toDateTimeZone(dateHistogram.getTimeZone())); - return singletonList(dateHistogramBuilder); + return Collections.singletonList(dateHistogramBuilder); } public static List> createValueSourceBuilders(final HistogramGroupConfig histogram) { @@ -506,7 +230,7 @@ public static List> createValueSourceBuilders(fi builders.add(histogramBuilder); } } - return unmodifiableList(builders); + return Collections.unmodifiableList(builders); } public static List> createValueSourceBuilders(final TermsGroupConfig terms) { @@ -520,7 +244,7 @@ public static List> createValueSourceBuilders(fi builders.add(termsBuilder); } } - return unmodifiableList(builders); + return Collections.unmodifiableList(builders); } /** @@ -564,7 +288,7 @@ static List createAggregationBuilders(final List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", false); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", false); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("1237859798")); } @@ -406,7 +406,7 @@ public void testKeyOrderingNewID() { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1L, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$c9LcrFqeFW92uN_Z7sv1hA")); } @@ -456,7 +456,7 @@ public void testKeyOrderingNewIDLong() { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$VAFKZpyaEqYRPLyic57_qw")); } @@ -483,14 +483,15 @@ public void testNullKeys() { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), randomHistogramGroupConfig(random()), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", randomBoolean()); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), + groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(1)); assertFalse(Strings.isNullOrEmpty(docs.get(0).id())); } public void testMissingBuckets() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String metricField = "metric_field"; String valueField = "value_field"; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 6d29ee9f9ba6d..55f1cfbdbb29c 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -50,10 +50,10 @@ import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.Before; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 955dcbc2beb48..c74ecbadf4fbe 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.mockito.stubbing.Answer; @@ -639,7 +639,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -743,7 +743,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -763,7 +763,7 @@ public void testSearchShardFailure() throws Exception { Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer failureConsumer = e -> { - assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for rollup job")); + assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for job")); isFinished.set(true); }; @@ -786,7 +786,7 @@ public void testSearchShardFailure() throws Exception { // Note: no pages processed, no docs were indexed assertThat(indexer.getStats().getNumPages(), equalTo(0L)); - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -896,7 +896,7 @@ protected void doNextBulk(BulkRequest request, ActionListener next assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 9a75d6fc67590..a47d057b5d5b9 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -20,11 +20,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index f3fa8114ddbd0..759ddbad2b463 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -210,3 +210,4 @@ setup: job_state: "stopped" upgraded_doc_id: true + From 95022affa51147c70a26e556d478bdac7c8e7c75 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 29 Aug 2018 15:56:13 -0400 Subject: [PATCH 05/52] Apply settings filter to get cluster settings API (#33247) Some settings have filters applied to them and we use this in logs and the get nodes info API. For consistency, we should apply this in the get cluster settings API too. --- .../cluster/RestClusterGetSettingsAction.java | 22 +++--- .../RestClusterGetSettingsActionTests.java | 70 +++++++++++++++++++ 2 files changed, 84 insertions(+), 8 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index b452b62eb5e95..746bb643bf62d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -87,13 +87,19 @@ public boolean canTripCircuitBreaker() { private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { - return - new ClusterGetSettingsResponse( - state.metaData().persistentSettings(), - state.metaData().transientSettings(), - renderDefaults ? - settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)) : - Settings.EMPTY - ).toXContent(builder, params); + return response(state, renderDefaults, settingsFilter, clusterSettings, settings).toXContent(builder, params); } + + static ClusterGetSettingsResponse response( + final ClusterState state, + final boolean renderDefaults, + final SettingsFilter settingsFilter, + final ClusterSettings clusterSettings, + final Settings settings) { + return new ClusterGetSettingsResponse( + settingsFilter.filter(state.metaData().persistentSettings()), + settingsFilter.filter(state.metaData().transientSettings()), + renderDefaults ? settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), settings)) : Settings.EMPTY); + } + } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java new file mode 100644 index 0000000000000..29b19739e7587 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + + +public class RestClusterGetSettingsActionTests extends ESTestCase { + + public void testFilterPersistentSettings() { + runTestFilterSettingsTest(MetaData.Builder::persistentSettings, ClusterGetSettingsResponse::getPersistentSettings); + } + + public void testFilterTransientSettings() { + runTestFilterSettingsTest(MetaData.Builder::transientSettings, ClusterGetSettingsResponse::getTransientSettings); + } + + private void runTestFilterSettingsTest( + final BiConsumer md, final Function s) { + final MetaData.Builder mdBuilder = new MetaData.Builder(); + final Settings settings = Settings.builder().put("foo.filtered", "bar").put("foo.non_filtered", "baz").build(); + md.accept(mdBuilder, settings); + final ClusterState.Builder builder = new ClusterState.Builder(ClusterState.EMPTY_STATE).metaData(mdBuilder); + final SettingsFilter filter = new SettingsFilter(Settings.EMPTY, Collections.singleton("foo.filtered")); + final Setting.Property[] properties = {Setting.Property.Dynamic, Setting.Property.Filtered, Setting.Property.NodeScope}; + final Set> settingsSet = Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + Stream.concat( + Stream.of(Setting.simpleString("foo.filtered", properties)), + Stream.of(Setting.simpleString("foo.non_filtered", properties)))) + .collect(Collectors.toSet()); + final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, settingsSet); + final ClusterGetSettingsResponse response = + RestClusterGetSettingsAction.response(builder.build(), randomBoolean(), filter, clusterSettings, Settings.EMPTY); + assertFalse(s.apply(response).hasValue("foo.filtered")); + assertTrue(s.apply(response).hasValue("foo.non_filtered")); + } + +} From 7d4895dc5cdc5b11a946016d42304fbbe8ce3109 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Wed, 29 Aug 2018 16:04:35 -0400 Subject: [PATCH 06/52] Added deprecation warning for rescore in scroll queries (#33070) This adds a deprecation warning for using rescore on scroll queries in 6.x. As per #31775 we will not be supporting this going forward. See also #32918 which implements the validation error for 7.0 --- docs/reference/migration/migrate_6_5.asciidoc | 11 +++++++++++ .../elasticsearch/action/search/SearchRequest.java | 4 ++++ .../elasticsearch/search/SearchRequestTests.java | 13 +++++++++++++ 3 files changed, 28 insertions(+) diff --git a/docs/reference/migration/migrate_6_5.asciidoc b/docs/reference/migration/migrate_6_5.asciidoc index a6b22cf38d356..432fe999982c6 100644 --- a/docs/reference/migration/migrate_6_5.asciidoc +++ b/docs/reference/migration/migrate_6_5.asciidoc @@ -5,6 +5,7 @@ This section discusses the changes that you need to be aware of when migrating your application to Elasticsearch 6.5. * <> +* <> * <> See also <> and <>. @@ -25,6 +26,16 @@ will not change the logging configuration files though. You should make this change before 7.0 because in 7.0 Elasticsearch will no longer automatically add the node name to the logging configuration if it isn't already present. +[[breaking_65_search_changes]] +=== Search changes + +==== Scroll + +Using `rescore` with a scroll query now raises a deprecation warning and +ignores the parameter. In earlier 6.x releases, rescore on scroll queries was +silently ignored. In 7.0 and later, we will return a `400 - Bad Request` with +a validation error. + [[breaking_65_sql_changes]] === SQL plugin changes diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 05b06e65dea65..105d5d3aa7b08 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -137,6 +137,10 @@ public ActionRequestValidationException validate() { if (source != null && source.size() == 0 && scroll != null) { validationException = addValidationError("[size] cannot be [0] in a scroll context", validationException); } + if (source != null && source.rescores() != null && source.rescores().isEmpty() == false && scroll != null) { + DEPRECATION_LOGGER.deprecated("Using [rescore] for a scroll query is deprecated and will be ignored. From 7.0 on will " + + "return a 400 error"); + } return validationException; } diff --git a/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java index eb2018f40c0a9..6fc0816168688 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchRequestTests.java @@ -28,7 +28,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.rescore.QueryRescorerBuilder; import java.io.IOException; import java.util.ArrayList; @@ -124,6 +126,17 @@ public void testValidate() throws IOException { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("[size] cannot be [0] in a scroll context", validationErrors.validationErrors().get(0)); } + { + // Rescore is deprecated on scroll requests + SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder()); + searchRequest.source().addRescorer(new QueryRescorerBuilder(QueryBuilders.matchAllQuery())); + searchRequest.requestCache(false); + searchRequest.scroll(new TimeValue(1000)); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNull(validationErrors); + assertWarnings("Using [rescore] for a scroll query is deprecated and will be ignored. From 7.0 on will return a 400 error"); + } + } public void testEqualsAndHashcode() throws IOException { From a29af74dfda19350136d9b3c6b09b0380a38e246 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 29 Aug 2018 17:10:00 -0400 Subject: [PATCH 07/52] [Rollup] Only allow aggregating on multiples of configured interval (#32052) We need to limit the search request aggregations to whole multiples of the configured interval for both histogram and date_histogram. Otherwise, agg buckets won't overlap with the rolled up buckets and the results will be incorrect. For histogram, the validation is very simple: request must be >= the config, and modulo evenly. Dates are more tricky. - If both request and config are fixed dates, we can convert to millis and treat them just like the histo - If both are calendar, we make sure the request is >= the config with a static lookup map that ranks the calendar values relatively. All calendar units are "singles", so they are evenly divisible already - We disallow any other combination (one fixed, one calendar, etc) --- x-pack/docs/build.gradle | 3 +- .../docs/en/rest-api/rollup/put-job.asciidoc | 2 + .../rollup/rollup-job-config.asciidoc | 50 +++++- .../en/rollup/rollup-getting-started.asciidoc | 124 +++++++------- .../rollup/rollup-search-limitations.asciidoc | 22 ++- .../rollup/RollupJobIdentifierUtils.java | 101 +++++++++-- .../rollup/RollupJobIdentifierUtilTests.java | 160 ++++++++++++++++++ .../elasticsearch/multi_node/RollupIT.java | 2 +- 8 files changed, 380 insertions(+), 84 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 7c867082257e2..4ea2ec3c2ef01 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -686,9 +686,8 @@ setups['sensor_prefab_data'] = ''' page_size: 1000 groups: date_histogram: - delay: "7d" field: "timestamp" - interval: "1h" + interval: "7d" time_zone: "UTC" terms: fields: diff --git a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc b/x-pack/docs/en/rest-api/rollup/put-job.asciidoc index 1449acadc636d..27889d985b8c8 100644 --- a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/put-job.asciidoc @@ -43,6 +43,8 @@ started with the <>. `metrics`:: (object) Defines the metrics that should be collected for each grouping tuple. See <>. +For more details about the job configuration, see <>. + ==== Authorization You must have `manage` or `manage_rollup` cluster privileges to use this API. diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc index 2ba92b6b59ea6..f937f28601a2e 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc @@ -23,7 +23,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -99,7 +99,7 @@ fields will then be available later for aggregating into buckets. For example, "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -133,9 +133,9 @@ The `date_histogram` group has several parameters: The date field that is to be rolled up. `interval` (required):: - The interval of time buckets to be generated when rolling up. E.g. `"1h"` will produce hourly rollups. This follows standard time formatting - syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"1h"`) - intervals are configured, <> can execute aggregations with 1hr or greater (weekly, monthly, etc) intervals. + The interval of time buckets to be generated when rolling up. E.g. `"60m"` will produce 60 minute (hourly) rollups. This follows standard time formatting + syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"60m"`) + intervals are configured, <> can execute aggregations with 60m or greater (weekly, monthly, etc) intervals. So define the interval as the smallest unit that you wish to later query. Note: smaller, more granular intervals take up proportionally more space. @@ -154,6 +154,46 @@ The `date_histogram` group has several parameters: to be stored with a specific timezone. By default, rollup documents are stored in `UTC`, but this can be changed with the `time_zone` parameter. +.Calendar vs Fixed time intervals +********************************** +Elasticsearch understands both "calendar" and "fixed" time intervals. Fixed time intervals are fairly easy to understand; +`"60s"` means sixty seconds. But what does `"1M` mean? One month of time depends on which month we are talking about, +some months are longer or shorter than others. This is an example of "calendar" time, and the duration of that unit +depends on context. Calendar units are also affected by leap-seconds, leap-years, etc. + +This is important because the buckets generated by Rollup will be in either calendar or fixed intervals, and will limit +how you can query them later (see <>. + +We recommend sticking with "fixed" time intervals, since they are easier to understand and are more flexible at query +time. It will introduce some drift in your data during leap-events, and you will have to think about months in a fixed +quantity (30 days) instead of the actual calendar length... but it is often easier than dealing with calendar units +at query time. + +Multiples of units are always "fixed" (e.g. `"2h"` is always the fixed quantity `7200` seconds. Single units can be +fixed or calendar depending on the unit: + +[options="header"] +|======= +|Unit |Calendar |Fixed +|millisecond |NA |`1ms`, `10ms`, etc +|second |NA |`1s`, `10s`, etc +|minute |`1m` |`2m`, `10m`, etc +|hour |`1h` |`2h`, `10h`, etc +|day |`1d` |`2d`, `10d`, etc +|week |`1w` |NA +|month |`1M` |NA +|quarter |`1q` |NA +|year |`1y` |NA +|======= + +For some units where there are both fixed and calendar, you may need to express the quantity in terms of the next +smaller unit. For example, if you want a fixed day (not a calendar day), you should specify `24h` instead of `1d`. +Similarly, if you want fixed hours, specify `60m` instead of `1h`. This is because the single quantity entails +calendar time, and limits you to querying by calendar time in the future. + + +********************************** + ===== Terms The `terms` group can be used on `keyword` or numeric fields, to allow bucketing via the `terms` aggregation at a later point. The `terms` diff --git a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc b/x-pack/docs/en/rollup/rollup-getting-started.asciidoc index 24f68dddd8101..b6c913d7d34ac 100644 --- a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc +++ b/x-pack/docs/en/rollup/rollup-getting-started.asciidoc @@ -37,8 +37,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", - "delay": "7d" + "interval": "60m" }, "terms": { "fields": ["node"] @@ -66,7 +65,7 @@ The `cron` parameter controls when and how often the job activates. When a roll from where it left off after the last activation. So if you configure the cron to run every 30 seconds, the job will process the last 30 seconds worth of data that was indexed into the `sensor-*` indices. -If instead the cron was configured to run once a day at midnight, the job would process the last 24hours worth of data. The choice is largely +If instead the cron was configured to run once a day at midnight, the job would process the last 24 hours worth of data. The choice is largely preference, based on how "realtime" you want the rollups, and if you wish to process continuously or move it to off-peak hours. Next, we define a set of `groups` and `metrics`. The metrics are fairly straightforward: we want to save the min/max/sum of the `temperature` @@ -79,7 +78,7 @@ It also allows us to run terms aggregations on the `node` field. .Date histogram interval vs cron schedule ********************************** You'll note that the job's cron is configured to run every 30 seconds, but the date_histogram is configured to -rollup at hourly intervals. How do these relate? +rollup at 60 minute intervals. How do these relate? The date_histogram controls the granularity of the saved data. Data will be rolled up into hourly intervals, and you will be unable to query with finer granularity. The cron simply controls when the process looks for new data to rollup. Every 30 seconds it will see @@ -223,70 +222,71 @@ Which returns a corresponding response: [source,js] ---- { - "took" : 93, - "timed_out" : false, - "terminated_early" : false, - "_shards" : ... , - "hits" : { - "total" : 0, - "max_score" : 0.0, - "hits" : [ ] - }, - "aggregations" : { - "timeline" : { - "meta" : { }, - "buckets" : [ - { - "key_as_string" : "2018-01-18T00:00:00.000Z", - "key" : 1516233600000, - "doc_count" : 6, - "nodes" : { - "doc_count_error_upper_bound" : 0, - "sum_other_doc_count" : 0, - "buckets" : [ - { - "key" : "a", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 5.1499998569488525 - } - }, - { - "key" : "b", - "doc_count" : 2, - "max_temperature" : { - "value" : 201.0 - }, - "avg_voltage" : { - "value" : 5.700000047683716 - } - }, - { - "key" : "c", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 4.099999904632568 - } - } - ] - } - } - ] - } - } + "took" : 93, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "timeline" : { + "meta" : { }, + "buckets" : [ + { + "key_as_string" : "2018-01-18T00:00:00.000Z", + "key" : 1516233600000, + "doc_count" : 6, + "nodes" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ + { + "key" : "a", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 5.1499998569488525 + } + }, + { + "key" : "b", + "doc_count" : 2, + "max_temperature" : { + "value" : 201.0 + }, + "avg_voltage" : { + "value" : 5.700000047683716 + } + }, + { + "key" : "c", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 4.099999904632568 + } + } + ] + } + } + ] + } + } } + ---- // TESTRESPONSE[s/"took" : 93/"took" : $body.$_path/] // TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] In addition to being more complicated (date histogram and a terms aggregation, plus an additional average metric), you'll notice -the date_histogram uses a `7d` interval instead of `1h`. +the date_histogram uses a `7d` interval instead of `60m`. [float] === Conclusion diff --git a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc b/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc index 57ba23eebccbe..99f19a179ede7 100644 --- a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc +++ b/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc @@ -80,9 +80,25 @@ The response will tell you that the field and aggregation were not possible, bec [float] === Interval Granularity -Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. If data is rolled up at hourly -intervals, the <> API can aggregate on any time interval hourly or greater. Intervals that are less than an hour will throw -an exception, since the data simply doesn't exist for finer granularities. +Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. This means you +can only search/aggregate the rollup data with an interval that is greater-than or equal to the configured rollup interval. + +For example, if data is rolled up at hourly intervals, the <> API can aggregate on any time interval +hourly or greater. Intervals that are less than an hour will throw an exception, since the data simply doesn't +exist for finer granularities. + +[[rollup-search-limitations-intervals]] +.Requests must be multiples of the config +********************************** +Perhaps not immediately apparent, but the interval specified in an aggregation request must be a whole +multiple of the configured interval. If the job was configured to rollup on `3d` intervals, you can only +query and aggregate on multiples of three (`3d`, `6d`, `9d`, etc). + +A non-multiple wouldn't work, since the rolled up data wouldn't cleanly "overlap" with the buckets generated +by the aggregation, leading to incorrect results. + +For that reason, an error is thrown if a whole multiple of the configured interval isn't found. +********************************** Because the RollupSearch endpoint can "upsample" intervals, there is no need to configure jobs with multiple intervals (hourly, daily, etc). It's recommended to just configure a single job with the smallest granularity that is needed, and allow the search endpoint to upsample diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java index d1706fd708e93..8537f2b6a38b4 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -17,7 +18,9 @@ import org.joda.time.DateTimeZone; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -32,6 +35,29 @@ public class RollupJobIdentifierUtils { private static final Comparator COMPARATOR = RollupJobIdentifierUtils.getComparator(); + public static final Map CALENDAR_ORDERING; + + static { + Map dateFieldUnits = new HashMap<>(16); + dateFieldUnits.put("year", 8); + dateFieldUnits.put("1y", 8); + dateFieldUnits.put("quarter", 7); + dateFieldUnits.put("1q", 7); + dateFieldUnits.put("month", 6); + dateFieldUnits.put("1M", 6); + dateFieldUnits.put("week", 5); + dateFieldUnits.put("1w", 5); + dateFieldUnits.put("day", 4); + dateFieldUnits.put("1d", 4); + dateFieldUnits.put("hour", 3); + dateFieldUnits.put("1h", 3); + dateFieldUnits.put("minute", 2); + dateFieldUnits.put("1m", 2); + dateFieldUnits.put("second", 1); + dateFieldUnits.put("1s", 1); + CALENDAR_ORDERING = Collections.unmodifiableMap(dateFieldUnits); + } + /** * Given the aggregation tree and a list of available job capabilities, this method will return a set * of the "best" jobs that should be searched. @@ -93,8 +119,9 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< if (fieldCaps != null) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - TimeValue interval = TimeValue.parseTimeValue((String)agg.get(RollupField.INTERVAL), "date_histogram.interval"); - String thisTimezone = (String) agg.get(DateHistogramGroupConfig.TIME_ZONE); + DateHistogramInterval interval = new DateHistogramInterval((String)agg.get(RollupField.INTERVAL)); + + String thisTimezone = (String)agg.get(DateHistogramGroupConfig.TIME_ZONE); String sourceTimeZone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString(); // Ensure we are working on the same timezone @@ -102,17 +129,20 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< continue; } if (source.dateHistogramInterval() != null) { - TimeValue sourceInterval = TimeValue.parseTimeValue(source.dateHistogramInterval().toString(), - "source.date_histogram.interval"); - //TODO should be divisor of interval - if (interval.compareTo(sourceInterval) <= 0) { + // Check if both are calendar and validate if they are. + // If not, check if both are fixed and validate + if (validateCalendarInterval(source.dateHistogramInterval(), interval)) { + localCaps.add(cap); + } else if (validateFixedInterval(source.dateHistogramInterval(), interval)) { localCaps.add(cap); } } else { - if (interval.getMillis() <= source.interval()) { + // check if config is fixed and validate if it is + if (validateFixedInterval(source.interval(), interval)) { localCaps.add(cap); } } + // not a candidate if we get here break; } } @@ -133,6 +163,55 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< } } + private static boolean isCalendarInterval(DateHistogramInterval interval) { + return DateHistogramAggregationBuilder.DATE_FIELD_UNITS.containsKey(interval.toString()); + } + + static boolean validateCalendarInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Both must be calendar intervals + if (isCalendarInterval(requestInterval) == false || isCalendarInterval(configInterval) == false) { + return false; + } + + // The request must be gte the config. The CALENDAR_ORDERING map values are integers representing + // relative orders between the calendar units + int requestOrder = CALENDAR_ORDERING.getOrDefault(requestInterval.toString(), Integer.MAX_VALUE); + int configOrder = CALENDAR_ORDERING.getOrDefault(configInterval.toString(), Integer.MAX_VALUE); + + // All calendar units are multiples naturally, so we just care about gte + return requestOrder >= configOrder; + } + + static boolean validateFixedInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Neither can be calendar intervals + if (isCalendarInterval(requestInterval) || isCalendarInterval(configInterval)) { + return false; + } + + // Both are fixed, good to conver to millis now + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + long requestIntervalMillis = TimeValue.parseTimeValue(requestInterval.toString(), + "date_histo.request.interval").getMillis(); + + // Must be a multiple and gte the config + return requestIntervalMillis >= configIntervalMillis && requestIntervalMillis % configIntervalMillis == 0; + } + + static boolean validateFixedInterval(long requestInterval, DateHistogramInterval configInterval) { + // config must not be a calendar interval + if (isCalendarInterval(configInterval)) { + return false; + } + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + + // Must be a multiple and gte the config + return requestInterval >= configIntervalMillis && requestInterval % configIntervalMillis == 0; + } + /** * Find the set of histo's with the largest interval */ @@ -144,8 +223,8 @@ private static void checkHisto(HistogramAggregationBuilder source, List agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { Long interval = (long)agg.get(RollupField.INTERVAL); - // TODO should be divisor of interval - if (interval <= source.interval()) { + // query interval must be gte the configured interval, and a whole multiple + if (interval <= source.interval() && source.interval() % interval == 0) { localCaps.add(cap); } break; @@ -155,8 +234,8 @@ private static void checkHisto(HistogramAggregationBuilder source, List caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("1000s")); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + + public void testBiggerButCompatibleFixedMillisInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100ms"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .interval(1000); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + public void testIncompatibleInterval() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -75,6 +101,20 @@ public void testIncompatibleInterval() { "[foo] which also satisfies all requirements of query.")); } + public void testIncompatibleFixedCalendarInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("5d"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("day")); + + RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + + "[foo] which also satisfies all requirements of query.")); + } + public void testBadTimeZone() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -385,6 +425,27 @@ public void testNoMatchingHistoInterval() { "[bar] which also satisfies all requirements of query.")); } + public void testHistoIntervalNotMultiple() { + HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo"); + histo.interval(10) // <--- interval is not a multiple of 3 + .field("bar") + .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) + .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); + + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", + new DateHistogramInterval("1d"), null, "UTC"), + new HistogramGroupConfig(3L, "bar"), + null); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + Exception e = expectThrows(RuntimeException.class, + () -> RollupJobIdentifierUtils.findBestJobs(histo, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [histogram] agg on field " + + "[bar] which also satisfies all requirements of query.")); + } + public void testMissingMetric() { int i = ESTestCase.randomIntBetween(0, 3); @@ -417,6 +478,105 @@ public void testMissingMetric() { } + public void testValidateFixedInterval() { + boolean valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(200, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(1000, new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(10*5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("second")); + assertFalse(valid); + + // ----------- + // Same tests, with both being DateHistoIntervals + // ----------- + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("200ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("1000ms"), + new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("5m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("20m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("second")); + assertFalse(valid); + } + + public void testValidateCalendarInterval() { + boolean valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("minute"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("month"), + new DateHistogramInterval("day")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("1d"), + new DateHistogramInterval("1s")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("1m")); + assertFalse(valid); + + // Fails because both are actually fixed + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertFalse(valid); + } + private Set singletonSet(RollupJobCaps cap) { Set caps = new HashSet<>(); caps.add(cap); diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java index 43ad4dc0a45a2..fb9c665b2bf1c 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java @@ -173,7 +173,7 @@ public void testBigRollup() throws Exception { " \"date_histo\": {\n" + " \"date_histogram\": {\n" + " \"field\": \"timestamp\",\n" + - " \"interval\": \"1h\",\n" + + " \"interval\": \"60m\",\n" + " \"format\": \"date_time\"\n" + " },\n" + " \"aggs\": {\n" + From 561005f4f5a0a3c2a20cc890fbe26c228397b706 Mon Sep 17 00:00:00 2001 From: Matt Weber Date: Wed, 29 Aug 2018 12:19:58 -0700 Subject: [PATCH 08/52] Fix classpath security checks for external tests. (#33066) This commit checks that when we manually add a class to the codebase map, that it does in-fact not exist on the classpath in a jar. This will only be true if we are using the test framework externally such as when a user develops a plugin. --- .../org/elasticsearch/bootstrap/BootstrapForTesting.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 2692a9521a9df..e041289435118 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -173,9 +173,12 @@ public boolean implies(ProtectionDomain domain, Permission permission) { /** Add the codebase url of the given classname to the codebases map, if the class exists. */ private static void addClassCodebase(Map codebases, String name, String classname) { try { - Class clazz = BootstrapForTesting.class.getClassLoader().loadClass(classname); - if (codebases.put(name, clazz.getProtectionDomain().getCodeSource().getLocation()) != null) { - throw new IllegalStateException("Already added " + name + " codebase for testing"); + Class clazz = BootstrapForTesting.class.getClassLoader().loadClass(classname); + URL location = clazz.getProtectionDomain().getCodeSource().getLocation(); + if (location.toString().endsWith(".jar") == false) { + if (codebases.put(name, location) != null) { + throw new IllegalStateException("Already added " + name + " codebase for testing"); + } } } catch (ClassNotFoundException e) { // no class, fall through to not add. this can happen for any tests that do not include From 0dec222a7ff7951d7dedd9d0d0f900aa27409679 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Wed, 29 Aug 2018 22:42:08 +0200 Subject: [PATCH 09/52] Watcher: Reload properly on remote shard change (#33167) When a node dies that carries a watcher shard or a shard is relocated to another node, then watcher needs not only trigger a reload on the node where the shard relocation happened, but also on other nodes where copies of this shard, as different watches may need to be loaded. This commit takes the change of remote nodes into account by not only storing the local shard allocation ids in the WatcherLifeCycleService, but storing a list of ShardRoutings based on the local active shards. This also fixes some tests, which had a wrong assumption. Using `TestShardRouting.newShardRouting` in our tests for cluster state creation led to the issue of always creating new allocation ids which implicitely lead to a reload. --- .../watcher/WatcherLifeCycleService.java | 29 +++--- .../watcher/WatcherLifeCycleServiceTests.java | 88 +++++++++++++++++-- 2 files changed, 97 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 7451208636897..127425308b615 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; @@ -23,13 +22,16 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -46,7 +48,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste Setting.boolSetting("xpack.watcher.require_manual_start", false, Property.NodeScope); private final AtomicReference state = new AtomicReference<>(WatcherState.STARTED); - private final AtomicReference> previousAllocationIds = new AtomicReference<>(Collections.emptyList()); + private final AtomicReference> previousShardRoutings = new AtomicReference<>(Collections.emptyList()); private final boolean requireManualStart; private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. private volatile WatcherService watcherService; @@ -147,15 +149,20 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - List currentAllocationIds = localShards.stream() - .map(ShardRouting::allocationId) - .map(AllocationId::getId) - .sorted() + // also check if non local shards have changed, as loosing a shard on a + // remote node or adding a replica on a remote node needs to trigger a reload too + Set localShardIds = localShards.stream().map(ShardRouting::shardId).collect(Collectors.toSet()); + List allShards = event.state().routingTable().index(watchIndex).shardsWithState(STARTED); + allShards.addAll(event.state().routingTable().index(watchIndex).shardsWithState(RELOCATING)); + List localAffectedShardRoutings = allShards.stream() + .filter(shardRouting -> localShardIds.contains(shardRouting.shardId())) + // shardrouting is not comparable, so we need some order mechanism + .sorted(Comparator.comparing(ShardRouting::hashCode)) .collect(Collectors.toList()); - if (previousAllocationIds.get().equals(currentAllocationIds) == false) { + if (previousShardRoutings.get().equals(localAffectedShardRoutings) == false) { if (watcherService.validate(event.state())) { - previousAllocationIds.set(Collections.unmodifiableList(currentAllocationIds)); + previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { watcherService.reload(event.state(), "new local watcher shard allocation ids"); } else if (state.get() == WatcherState.STOPPED) { @@ -219,13 +226,13 @@ private boolean isWatcherStoppedManually(ClusterState state) { * @return true, if existing allocation ids were cleaned out, false otherwise */ private boolean clearAllocationIds() { - List previousIds = previousAllocationIds.getAndSet(Collections.emptyList()); + List previousIds = previousShardRoutings.getAndSet(Collections.emptyList()); return previousIds.equals(Collections.emptyList()) == false; } // for testing purposes only - List allocationIds() { - return previousAllocationIds.get(); + List shardRoutings() { + return previousShardRoutings.get(); } public WatcherState getState() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index d83cacfacdff6..38c453292fab7 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -255,9 +255,12 @@ public void testReplicaWasAddedOrRemoved() { .add(newNode("node_2")) .build(); + ShardRouting firstShardOnSecondNode = TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED); + ShardRouting secondShardOnFirstNode = TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) .build(); IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) @@ -274,10 +277,19 @@ public void testReplicaWasAddedOrRemoved() { .metaData(MetaData.builder().put(indexMetaData, false)) .build(); + // add a replica in the local node + boolean addShardOnLocalNode = randomBoolean(); + final ShardRouting addedShardRouting; + if (addShardOnLocalNode) { + addedShardRouting = TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED); + } else { + addedShardRouting = TestShardRouting.newShardRouting(secondShardId, "node_2", false, STARTED); + } + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED)) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) + .addShard(addedShardRouting) .build(); ClusterState stateWithReplicaAdded = ClusterState.builder(new ClusterName("my-cluster")) @@ -563,7 +575,67 @@ public void testDataNodeWithoutDataCanStart() { assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); } - private ClusterState startWatcher() { + // this emulates a node outage somewhere in the cluster that carried a watcher shard + // the number of shards remains the same, but we need to ensure that watcher properly reloads + // previously we only checked the local shard allocations, but we also need to check if shards in the cluster have changed + public void testWatcherReloadsOnNodeOutageWithWatcherShard() { + Index watchIndex = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + String localNodeId = randomFrom("node_1", "node_2"); + String outageNodeId = localNodeId.equals("node_1") ? "node_2" : "node_1"; + DiscoveryNodes previousDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .add(newNode(outageNodeId)) + .build(); + + ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, localNodeId, false, STARTED); + ShardRouting primartShardRouting = TestShardRouting.newShardRouting(shardId, outageNodeId, true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(replicaShardRouting) + .addShard(primartShardRouting) + .build(); + + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6) + ).build(); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(previousDiscoveryNodes) + .routingTable(RoutingTable.builder().add(previousWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + ShardRouting nowPrimaryShardRouting = replicaShardRouting.moveActiveReplicaToPrimary(); + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(nowPrimaryShardRouting) + .build(); + + DiscoveryNodes currentDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .build(); + + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(currentDiscoveryNodes) + .routingTable(RoutingTable.builder().add(currentWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + // initialize the previous state, so all the allocation ids are loaded + when(watcherService.validate(anyObject())).thenReturn(true); + lifeCycleService.clusterChanged(new ClusterChangedEvent("whatever", previousState, currentState)); + + reset(watcherService); + when(watcherService.validate(anyObject())).thenReturn(true); + ClusterChangedEvent event = new ClusterChangedEvent("whatever", currentState, previousState); + lifeCycleService.clusterChanged(event); + verify(watcherService).reload(eq(event.state()), anyString()); + } + + private void startWatcher() { Index index = new Index(Watch.INDEX, "uuid"); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); indexRoutingTableBuilder.addShard( @@ -593,12 +665,10 @@ private ClusterState startWatcher() { lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); verify(watcherService, times(1)).reload(eq(state), anyString()); - assertThat(lifeCycleService.allocationIds(), hasSize(1)); + assertThat(lifeCycleService.shardRoutings(), hasSize(1)); // reset the mock, the user has to mock everything themselves again reset(watcherService); - - return state; } private List randomIndexPatterns() { From 938a99a8ce91568da248b75cdd1c951dea027d1c Mon Sep 17 00:00:00 2001 From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com> Date: Thu, 30 Aug 2018 12:08:29 +1000 Subject: [PATCH 10/52] [Kerberos] Add unsupported languages for tests (#33253) Ran for all locales in system to find locales which caused problems in tests due to incorrect generalized time handling in simple kdc ldap server. Closes#33228 --- .../xpack/security/authc/kerberos/KerberosTestCase.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index 2bd1bdf906ad8..f97afc1d52c2d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -74,6 +74,13 @@ public abstract class KerberosTestCase extends ESTestCase { unsupportedLocaleLanguages.add("ne"); unsupportedLocaleLanguages.add("dz"); unsupportedLocaleLanguages.add("mzn"); + unsupportedLocaleLanguages.add("mr"); + unsupportedLocaleLanguages.add("as"); + unsupportedLocaleLanguages.add("bn"); + unsupportedLocaleLanguages.add("lrc"); + unsupportedLocaleLanguages.add("my"); + unsupportedLocaleLanguages.add("ps"); + unsupportedLocaleLanguages.add("ur"); } @BeforeClass From 39244bb027d6d5d390b6ba665b76d6a212705309 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Wed, 29 Aug 2018 21:44:33 -0600 Subject: [PATCH 11/52] HLRC: add client side RefreshPolicy (#33209) With the switch to client side request and response objects, we need a client side version of RefreshPolicy. This change adds a client side version of RefreshPolicy along with a method to add it to the parameters of a request. The existing method to add WriteRequest.RefreshPolicy to the parameters of a request is now deprecated. --- .../client/RequestConverters.java | 15 ++++- .../client/security/RefreshPolicy.java | 59 +++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 131f9a14f383d..edb18b89d1c11 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -88,6 +88,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -1352,11 +1353,16 @@ Params withRealtime(boolean realtime) { Params withRefresh(boolean refresh) { if (refresh) { - return withRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + return withRefreshPolicy(RefreshPolicy.IMMEDIATE); } return this; } + /** + * @deprecated If creating a new HLRC ReST API call, use {@link RefreshPolicy} + * instead of {@link WriteRequest.RefreshPolicy} from the server project + */ + @Deprecated Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { return putParam("refresh", refreshPolicy.getValue()); @@ -1364,6 +1370,13 @@ Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { return this; } + Params withRefreshPolicy(RefreshPolicy refreshPolicy) { + if (refreshPolicy != RefreshPolicy.NONE) { + return putParam("refresh", refreshPolicy.getValue()); + } + return this; + } + Params withRetryOnConflict(int retryOnConflict) { if (retryOnConflict > 0) { return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java new file mode 100644 index 0000000000000..8b72f704edff4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +/** + * Enumeration of values that control the refresh policy for a request that + * supports specifying a refresh policy. + */ +public enum RefreshPolicy { + + /** + * Don't refresh after this request. The default. + */ + NONE("false"), + /** + * Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful + * to present a consistent view to for indices with very low traffic. And it is wonderful for tests! + */ + IMMEDIATE("true"), + /** + * Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is + * compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs. + */ + WAIT_UNTIL("wait_for"); + + private final String value; + + RefreshPolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + /** + * Get the default refresh policy, which is NONE + */ + public static RefreshPolicy getDefault() { + return RefreshPolicy.NONE; + } +} From 2e70890646945cd8994a72260db6442202fddfaa Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 30 Aug 2018 06:43:04 +0100 Subject: [PATCH 12/52] Move file-based discovery to core (#33241) Today we support a static list of seed hosts in core Elasticsearch, and allow a dynamic list of seed hosts to be provided via a file using the `discovery-file` plugin. In fact the ability to provide a dynamic list of seed hosts is increasingly useful, so this change moves this functionality to core Elasticsearch to avoid the need for a plugin. Furthermore, in order to start up nodes in integration tests we currently assign a known port to each node before startup, which unfortunately sometimes fails if another process grabs the selected port in the meantime. By moving the `discovery-file` functionality into the core product we can use it to avoid this race. This change also moves the expected path to the file from `$ES_PATH_CONF/discovery-file/unicast_hosts.txt` to `$ES_PATH_CONF/unicast_hosts.txt`. An example of this file is not included in distributions. For BWC purposes the plugin still exists, but does nothing more than create the example file in the old location, and issue a warning when it is used. We also continue to support the old location for the file, but warn about its deprecation. Relates #29244 Closes #33030 --- docs/plugins/discovery-file.asciidoc | 73 +----- docs/reference/modules/discovery/zen.asciidoc | 242 ++++++++++++------ .../file/FileBasedDiscoveryPlugin.java | 24 +- .../file/FileBasedUnicastHostsProvider.java | 83 ------ ...eBasedDiscoveryPluginDeprecationTests.java | 32 +++ .../discovery/DiscoveryModule.java | 5 +- .../zen/FileBasedUnicastHostsProvider.java | 92 +++++++ .../java/org/elasticsearch/node/Node.java | 2 +- .../discovery/DiscoveryModuleTests.java | 4 +- .../FileBasedUnicastHostsProviderTests.java | 103 +++++--- 10 files changed, 370 insertions(+), 290 deletions(-) delete mode 100644 plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java create mode 100644 plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java create mode 100644 server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java rename {plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file => server/src/test/java/org/elasticsearch/discovery/zen}/FileBasedUnicastHostsProviderTests.java (63%) diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc index ad06cfc0cc5f1..4f2182da056a0 100644 --- a/docs/plugins/discovery-file.asciidoc +++ b/docs/plugins/discovery-file.asciidoc @@ -1,71 +1,14 @@ [[discovery-file]] === File-Based Discovery Plugin -The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file -in the `config/discovery-file` directory for unicast discovery. +The functionality provided by the `discovery-file` plugin is now available in +Elasticsearch without requiring a plugin. This plugin still exists to ensure +backwards compatibility, but it will be removed in a future version. + +On installation, this plugin creates a file at +`$ES_PATH_CONF/discovery-file/unicast_hosts.txt` that comprises comments that +describe how to use it. It is preferable not to install this plugin and instead +to create this file, and its containing directory, using standard tools. :plugin_name: discovery-file include::install_remove.asciidoc[] - -[[discovery-file-usage]] -[float] -==== Using the file-based discovery plugin - -The file-based discovery plugin provides the ability to specify the -unicast hosts list through a simple `unicast_hosts.txt` file that can -be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`: - -[source,yaml] ----- -discovery.zen.hosts_provider: file ----- - -This plugin simply provides a facility to supply the unicast hosts list for -zen discovery through an external file that can be updated at any time by a side process. - -For example, this gives a convenient mechanism for an Elasticsearch instance -that is run in docker containers to be dynamically supplied a list of IP -addresses to connect to for zen discovery when those IP addresses may not be -known at node startup. - -Note that the file-based discovery plugin is meant to augment the unicast -hosts list in `elasticsearch.yml` (if specified), not replace it. Therefore, -if there are valid unicast host entries in `discovery.zen.ping.unicast.hosts`, -they will be used in addition to those supplied in `unicast_hosts.txt`. - -Anytime a change is made to the `unicast_hosts.txt` file, even as Elasticsearch -continues to run, the new changes will be picked up by the plugin and the -new hosts list will be used for the next pinging round for master election. - -Upon installation of the plugin, a default `unicast_hosts.txt` file will -be found in the `$CONFIG_DIR/discovery-file` directory. This default file -will contain some comments about what the file should contain. All comments -for this file must appear on their lines starting with `#` (i.e. comments -cannot start in the middle of a line). - -[[discovery-file-format]] -[float] -==== unicast_hosts.txt file format - -The format of the file is to specify one unicast host entry per line. -Each unicast host entry consists of the host (host name or IP address) and -an optional transport port number. If the port number is specified, is must -come immediately after the host (on the same line) separated by a `:`. -If the port number is not specified, a default value of 9300 is used. - -For example, this is an example of `unicast_hosts.txt` for a cluster with -four nodes that participate in unicast discovery, some of which are not -running on the default port: - -[source,txt] ----------------------------------------------------------------- -10.10.10.5 -10.10.10.6:9305 -10.10.10.5:10005 -# an IPv6 address -[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 ----------------------------------------------------------------- - -Host names are allowed instead of IP addresses (similar to -`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be -specified in brackets with the port coming after the brackets. diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index f0f26a4665966..d90be42d9178a 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -1,13 +1,12 @@ [[modules-discovery-zen]] === Zen Discovery -The zen discovery is the built in discovery module for Elasticsearch and -the default. It provides unicast discovery, but can be extended to -support cloud environments and other forms of discovery. +Zen discovery is the built-in, default, discovery module for Elasticsearch. It +provides unicast and file-based discovery, and can be extended to support cloud +environments and other forms of discovery via plugins. -The zen discovery is integrated with other modules, for example, all -communication between nodes is done using the -<> module. +Zen discovery is integrated with other modules, for example, all communication +between nodes is done using the <> module. It is separated into several sub modules, which are explained below: @@ -15,86 +14,159 @@ It is separated into several sub modules, which are explained below: [[ping]] ==== Ping -This is the process where a node uses the discovery mechanisms to find -other nodes. +This is the process where a node uses the discovery mechanisms to find other +nodes. + +[float] +[[discovery-seed-nodes]] +==== Seed nodes + +Zen discovery uses a list of _seed_ nodes in order to start off the discovery +process. At startup, or when electing a new master, Elasticsearch tries to +connect to each seed node in its list, and holds a gossip-like conversation with +them to find other nodes and to build a complete picture of the cluster. By +default there are two methods for configuring the list of seed nodes: _unicast_ +and _file-based_. It is recommended that the list of seed nodes comprises the +list of master-eligible nodes in the cluster. [float] [[unicast]] ===== Unicast -Unicast discovery requires a list of hosts to use that will act as gossip -routers. These hosts can be specified as hostnames or IP addresses; hosts -specified as hostnames are resolved to IP addresses during each round of -pinging. Note that if you are in an environment where DNS resolutions vary with -time, you might need to adjust your <>. +Unicast discovery configures a static list of hosts for use as seed nodes. +These hosts can be specified as hostnames or IP addresses; hosts specified as +hostnames are resolved to IP addresses during each round of pinging. Note that +if you are in an environment where DNS resolutions vary with time, you might +need to adjust your <>. -It is recommended that the unicast hosts list be maintained as the list of -master-eligible nodes in the cluster. +The list of hosts is set using the `discovery.zen.ping.unicast.hosts` static +setting. This is either an array of hosts or a comma-delimited string. Each +value should be in the form of `host:port` or `host` (where `port` defaults to +the setting `transport.profiles.default.port` falling back to +`transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. The +default for this setting is `127.0.0.1, [::1]` -Unicast discovery provides the following settings with the `discovery.zen.ping.unicast` prefix: +Additionally, the `discovery.zen.ping.unicast.resolve_timeout` configures the +amount of time to wait for DNS lookups on each round of pinging. This is +specified as a <> and defaults to 5s. -[cols="<,<",options="header",] -|======================================================================= -|Setting |Description -|`hosts` |Either an array setting or a comma delimited setting. Each - value should be in the form of `host:port` or `host` (where `port` defaults to the setting `transport.profiles.default.port` - falling back to `transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. Defaults to `127.0.0.1, [::1]` -|`hosts.resolve_timeout` |The amount of time to wait for DNS lookups on each round of pinging. Specified as -<>. Defaults to 5s. -|======================================================================= +Unicast discovery uses the <> module to perform the +discovery. -The unicast discovery uses the <> module to perform the discovery. +[float] +[[file-based-hosts-provider]] +===== File-based + +In addition to hosts provided by the static `discovery.zen.ping.unicast.hosts` +setting, it is possible to provide a list of hosts via an external file. +Elasticsearch reloads this file when it changes, so that the list of seed nodes +can change dynamically without needing to restart each node. For example, this +gives a convenient mechanism for an Elasticsearch instance that is run in a +Docker container to be dynamically supplied with a list of IP addresses to +connect to for Zen discovery when those IP addresses may not be known at node +startup. + +To enable file-based discovery, configure the `file` hosts provider as follows: + +``` +discovery.zen.hosts_provider: file +``` + +Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in +<>. Any time a change is made +to the `unicast_hosts.txt` file the new changes will be picked up by +Elasticsearch and the new hosts list will be used. + +Note that the file-based discovery plugin augments the unicast hosts list in +`elasticsearch.yml`: if there are valid unicast host entries in +`discovery.zen.ping.unicast.hosts` then they will be used in addition to those +supplied in `unicast_hosts.txt`. + +The `discovery.zen.ping.unicast.resolve_timeout` setting also applies to DNS +lookups for nodes specified by address via file-based discovery. This is +specified as a <> and defaults to 5s. + +[[discovery-file-format]] +[float] +====== unicast_hosts.txt file format + +The format of the file is to specify one node entry per line. Each node entry +consists of the host (host name or IP address) and an optional transport port +number. If the port number is specified, is must come immediately after the +host (on the same line) separated by a `:`. If the port number is not +specified, a default value of 9300 is used. + +For example, this is an example of `unicast_hosts.txt` for a cluster with four +nodes that participate in unicast discovery, some of which are not running on +the default port: + +[source,txt] +---------------------------------------------------------------- +10.10.10.5 +10.10.10.6:9305 +10.10.10.5:10005 +# an IPv6 address +[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 +---------------------------------------------------------------- + +Host names are allowed instead of IP addresses (similar to +`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in +brackets with the port coming after the brackets. + +It is also possible to add comments to this file. All comments must appear on +their lines starting with `#` (i.e. comments cannot start in the middle of a +line). [float] [[master-election]] ==== Master Election -As part of the ping process a master of the cluster is either -elected or joined to. This is done automatically. The -`discovery.zen.ping_timeout` (which defaults to `3s`) determines how long the node -will wait before deciding on starting an election or joining an existing cluster. -Three pings will be sent over this timeout interval. In case where no decision can be -reached after the timeout, the pinging process restarts. -In slow or congested networks, three seconds might not be enough for a node to become -aware of the other nodes in its environment before making an election decision. -Increasing the timeout should be done with care in that case, as it will slow down the -election process. -Once a node decides to join an existing formed cluster, it -will send a join request to the master (`discovery.zen.join_timeout`) -with a timeout defaulting at 20 times the ping timeout. - -When the master node stops or has encountered a problem, the cluster nodes -start pinging again and will elect a new master. This pinging round also -serves as a protection against (partial) network failures where a node may unjustly -think that the master has failed. In this case the node will simply hear from -other nodes about the currently active master. - -If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from nodes that are not master -eligible (nodes where `node.master` is `false`) are ignored during master election; the default value is +As part of the ping process a master of the cluster is either elected or joined +to. This is done automatically. The `discovery.zen.ping_timeout` (which defaults +to `3s`) determines how long the node will wait before deciding on starting an +election or joining an existing cluster. Three pings will be sent over this +timeout interval. In case where no decision can be reached after the timeout, +the pinging process restarts. In slow or congested networks, three seconds +might not be enough for a node to become aware of the other nodes in its +environment before making an election decision. Increasing the timeout should +be done with care in that case, as it will slow down the election process. Once +a node decides to join an existing formed cluster, it will send a join request +to the master (`discovery.zen.join_timeout`) with a timeout defaulting at 20 +times the ping timeout. + +When the master node stops or has encountered a problem, the cluster nodes start +pinging again and will elect a new master. This pinging round also serves as a +protection against (partial) network failures where a node may unjustly think +that the master has failed. In this case the node will simply hear from other +nodes about the currently active master. + +If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from +nodes that are not master eligible (nodes where `node.master` is `false`) are +ignored during master election; the default value is `false`. + +Nodes can be excluded from becoming a master by setting `node.master` to `false`. -Nodes can be excluded from becoming a master by setting `node.master` to `false`. - -The `discovery.zen.minimum_master_nodes` sets the minimum -number of master eligible nodes that need to join a newly elected master in order for an election to -complete and for the elected node to accept its mastership. The same setting controls the minimum number of -active master eligible nodes that should be a part of any active cluster. If this requirement is not met the -active master node will step down and a new master election will begin. +The `discovery.zen.minimum_master_nodes` sets the minimum number of master +eligible nodes that need to join a newly elected master in order for an election +to complete and for the elected node to accept its mastership. The same setting +controls the minimum number of active master eligible nodes that should be a +part of any active cluster. If this requirement is not met the active master +node will step down and a new master election will begin. This setting must be set to a <> of your master eligible nodes. It is recommended to avoid having only two master eligible -nodes, since a quorum of two is two. Therefore, a loss of either master -eligible node will result in an inoperable cluster. +nodes, since a quorum of two is two. Therefore, a loss of either master eligible +node will result in an inoperable cluster. [float] [[fault-detection]] ==== Fault Detection -There are two fault detection processes running. The first is by the -master, to ping all the other nodes in the cluster and verify that they -are alive. And on the other end, each node pings to master to verify if -its still alive or an election process needs to be initiated. +There are two fault detection processes running. The first is by the master, to +ping all the other nodes in the cluster and verify that they are alive. And on +the other end, each node pings to master to verify if its still alive or an +election process needs to be initiated. The following settings control the fault detection process using the `discovery.zen.fd` prefix: @@ -116,19 +188,21 @@ considered failed. Defaults to `3`. The master node is the only node in a cluster that can make changes to the cluster state. The master node processes one cluster state update at a time, -applies the required changes and publishes the updated cluster state to all -the other nodes in the cluster. Each node receives the publish message, acknowledges -it, but does *not* yet apply it. If the master does not receive acknowledgement from -at least `discovery.zen.minimum_master_nodes` nodes within a certain time (controlled by -the `discovery.zen.commit_timeout` setting and defaults to 30 seconds) the cluster state -change is rejected. - -Once enough nodes have responded, the cluster state is committed and a message will -be sent to all the nodes. The nodes then proceed to apply the new cluster state to their -internal state. The master node waits for all nodes to respond, up to a timeout, before -going ahead processing the next updates in the queue. The `discovery.zen.publish_timeout` is -set by default to 30 seconds and is measured from the moment the publishing started. Both -timeout settings can be changed dynamically through the <> +applies the required changes and publishes the updated cluster state to all the +other nodes in the cluster. Each node receives the publish message, acknowledges +it, but does *not* yet apply it. If the master does not receive acknowledgement +from at least `discovery.zen.minimum_master_nodes` nodes within a certain time +(controlled by the `discovery.zen.commit_timeout` setting and defaults to 30 +seconds) the cluster state change is rejected. + +Once enough nodes have responded, the cluster state is committed and a message +will be sent to all the nodes. The nodes then proceed to apply the new cluster +state to their internal state. The master node waits for all nodes to respond, +up to a timeout, before going ahead processing the next updates in the queue. +The `discovery.zen.publish_timeout` is set by default to 30 seconds and is +measured from the moment the publishing started. Both timeout settings can be +changed dynamically through the <> [float] [[no-master-block]] @@ -143,10 +217,14 @@ rejected when there is no active master. The `discovery.zen.no_master_block` setting has two valid options: [horizontal] -`all`:: All operations on the node--i.e. both read & writes--will be rejected. This also applies for api cluster state -read or write operations, like the get index settings, put mapping and cluster state api. -`write`:: (default) Write operations will be rejected. Read operations will succeed, based on the last known cluster configuration. -This may result in partial reads of stale data as this node may be isolated from the rest of the cluster. - -The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis (for example cluster stats, node info and -node stats apis). Requests to these apis will not be blocked and can run on any available node. +`all`:: All operations on the node--i.e. both read & writes--will be rejected. +This also applies for api cluster state read or write operations, like the get +index settings, put mapping and cluster state api. +`write`:: (default) Write operations will be rejected. Read operations will +succeed, based on the last known cluster configuration. This may result in +partial reads of stale data as this node may be isolated from the rest of the +cluster. + +The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis +(for example cluster stats, node info and node stats apis). Requests to these +apis will not be blocked and can run on any available node. diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index 4d26447078597..48fa49b9a8a35 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -19,39 +19,33 @@ package org.elasticsearch.discovery.file; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.TransportService; -import java.nio.file.Path; import java.util.Collections; import java.util.Map; import java.util.function.Supplier; -/** - * Plugin for providing file-based unicast hosts discovery. The list of unicast hosts - * is obtained by reading the {@link FileBasedUnicastHostsProvider#UNICAST_HOSTS_FILE} in - * the {@link Environment#configFile()}/discovery-file directory. - */ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { - private final Settings settings; - private final Path configPath; + private final DeprecationLogger deprecationLogger; + static final String DEPRECATION_MESSAGE + = "File-based discovery is now built into Elasticsearch and does not require the discovery-file plugin"; - public FileBasedDiscoveryPlugin(Settings settings, Path configPath) { - this.settings = settings; - this.configPath = configPath; + public FileBasedDiscoveryPlugin(Settings settings) { + deprecationLogger = new DeprecationLogger(Loggers.getLogger(this.getClass(), settings)); } @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { - return Collections.singletonMap( - "file", - () -> new FileBasedUnicastHostsProvider(new Environment(settings, configPath))); + deprecationLogger.deprecated(DEPRECATION_MESSAGE); + return Collections.emptyMap(); } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java deleted file mode 100644 index 584ae4de5a2b5..0000000000000 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.file; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * An implementation of {@link UnicastHostsProvider} that reads hosts/ports - * from {@link #UNICAST_HOSTS_FILE}. - * - * Each unicast host/port that is part of the discovery process must be listed on - * a separate line. If the port is left off an entry, a default port of 9300 is - * assumed. An example unicast hosts file could read: - * - * 67.81.244.10 - * 67.81.244.11:9305 - * 67.81.244.15:9400 - */ -class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { - - static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; - - private final Path unicastHostsFilePath; - - FileBasedUnicastHostsProvider(Environment environment) { - super(environment.settings()); - this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); - } - - @Override - public List buildDynamicHosts(HostsResolver hostsResolver) { - List hostsList; - try (Stream lines = Files.lines(unicastHostsFilePath)) { - hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments - .collect(Collectors.toList()); - } catch (FileNotFoundException | NoSuchFileException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Failed to find unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Error reading unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } - - final List dynamicHosts = hostsResolver.resolveHosts(hostsList, 1); - logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts); - return dynamicHosts; - } - -} diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java new file mode 100644 index 0000000000000..643c7b2c95c27 --- /dev/null +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.file; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin.DEPRECATION_MESSAGE; + +public class FileBasedDiscoveryPluginDeprecationTests extends ESTestCase { + public void testDeprecationWarning() { + new FileBasedDiscoveryPlugin(Settings.EMPTY).getZenHostsProviders(null, null); + assertWarnings(DEPRECATION_MESSAGE); + } +} diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index e47fe7a7a70ed..f34798605d784 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.single.SingleNodeDiscovery; +import org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider; import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -40,6 +41,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -69,10 +71,11 @@ public class DiscoveryModule { public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, - AllocationService allocationService) { + AllocationService allocationService, Path configFile) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService)); + hostProviders.put("file", () -> new FileBasedUnicastHostsProvider(settings, configFile)); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java new file mode 100644 index 0000000000000..f339ae43a703e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.zen; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * An implementation of {@link UnicastHostsProvider} that reads hosts/ports + * from {@link #UNICAST_HOSTS_FILE}. + * + * Each unicast host/port that is part of the discovery process must be listed on + * a separate line. If the port is left off an entry, a default port of 9300 is + * assumed. An example unicast hosts file could read: + * + * 67.81.244.10 + * 67.81.244.11:9305 + * 67.81.244.15:9400 + */ +public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + public static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; + + private final Path unicastHostsFilePath; + private final Path legacyUnicastHostsFilePath; + + public FileBasedUnicastHostsProvider(Settings settings, Path configFile) { + super(settings); + this.unicastHostsFilePath = configFile.resolve(UNICAST_HOSTS_FILE); + this.legacyUnicastHostsFilePath = configFile.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); + } + + private List getHostsList() { + if (Files.exists(unicastHostsFilePath)) { + return readFileContents(unicastHostsFilePath); + } + + if (Files.exists(legacyUnicastHostsFilePath)) { + deprecationLogger.deprecated("Found dynamic hosts list at [{}] but this path is deprecated. This list should be at [{}] " + + "instead. Support for the deprecated path will be removed in future.", legacyUnicastHostsFilePath, unicastHostsFilePath); + return readFileContents(legacyUnicastHostsFilePath); + } + + logger.warn("expected, but did not find, a dynamic hosts list at [{}]", unicastHostsFilePath); + + return Collections.emptyList(); + } + + private List readFileContents(Path path) { + try (Stream lines = Files.lines(path)) { + return lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments + .collect(Collectors.toList()); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("failed to read file [{}]", unicastHostsFilePath), e); + return Collections.emptyList(); + } + } + + @Override + public List buildDynamicHosts(HostsResolver hostsResolver) { + final List transportAddresses = hostsResolver.resolveHosts(getHostsList(), 1); + logger.debug("seed addresses: {}", transportAddresses); + return transportAddresses; + } +} diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 7908b931468fc..f8f95aed8c404 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -486,7 +486,7 @@ protected Node(final Environment environment, Collection final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), - clusterModule.getAllocationService()); + clusterModule.getAllocationService(), environment.configFile()); this.nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(), transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(), httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService, diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index f2491b2db1f9a..82ec987420bb7 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.discovery; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -29,6 +28,7 @@ import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -99,7 +99,7 @@ public void clearDummyServices() throws IOException { private DiscoveryModule newModule(Settings settings, List plugins) { return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService, - clusterApplier, clusterSettings, plugins, null); + clusterApplier, clusterSettings, plugins, null, createTempDir().toAbsolutePath()); } public void testDefaults() { diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java similarity index 63% rename from plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java rename to server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index 5837d3bcdfe3f..8922a38ea1e78 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.file; +package org.elasticsearch.discovery.zen; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -26,9 +26,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -50,16 +48,15 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; +import static org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; -/** - * Tests for {@link FileBasedUnicastHostsProvider}. - */ public class FileBasedUnicastHostsProviderTests extends ESTestCase { + private boolean legacyLocation; private ThreadPool threadPool; private ExecutorService executorService; private MockTransportService transportService; + private Path configPath; @Before public void setUp() throws Exception { @@ -83,23 +80,20 @@ public void tearDown() throws Exception { @Before public void createTransportSvc() { - MockTcpTransport transport = - new MockTcpTransport(Settings.EMPTY, - threadPool, - BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), - new NamedWriteableRegistry(Collections.emptyList()), - new NetworkService(Collections.emptyList())) { - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9300) - ); - } - }; + final MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), + new NamedWriteableRegistry(Collections.emptyList()), + new NetworkService(Collections.emptyList())) { + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9300) + ); + } + }; transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null); + null); } public void testBuildDynamicNodes() throws Exception { @@ -114,18 +108,27 @@ public void testBuildDynamicNodes() throws Exception { assertEquals(9300, nodes.get(2).getPort()); } + public void testBuildDynamicNodesLegacyLocation() throws Exception { + legacyLocation = true; + testBuildDynamicNodes(); + assertDeprecatedLocationWarning(); + } + public void testEmptyUnicastHostsFile() throws Exception { final List hostEntries = Collections.emptyList(); final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } - public void testUnicastHostsDoesNotExist() throws Exception { - final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - final Environment environment = TestEnvironment.newEnvironment(settings); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment); + public void testEmptyUnicastHostsFileLegacyLocation() throws Exception { + legacyLocation = true; + testEmptyUnicastHostsFile(); + assertDeprecatedLocationWarning(); + } + + public void testUnicastHostsDoesNotExist() { + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, createTempDir().toAbsolutePath()); final List addresses = provider.buildDynamicHosts((hosts, limitPortCounts) -> UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); @@ -133,42 +136,60 @@ public void testUnicastHostsDoesNotExist() throws Exception { } public void testInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } + public void testInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + public void testSomeInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(1, addresses.size()); // only one of the two is valid and will be used assertEquals("192.168.0.1", addresses.get(0).getAddress()); assertEquals(9301, addresses.get(0).getPort()); } + public void testSomeInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testSomeInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + // sets up the config dir, writes to the unicast hosts file in the config dir, // and then runs the file-based unicast host provider to get the list of discovery nodes private List setupAndRunHostProvider(final List hostEntries) throws IOException { final Path homeDir = createTempDir(); final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) - .build(); - final Path configPath; + .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) + .build(); if (randomBoolean()) { configPath = homeDir.resolve("config"); } else { configPath = createTempDir(); } - final Path discoveryFilePath = configPath.resolve("discovery-file"); + final Path discoveryFilePath = legacyLocation ? configPath.resolve("discovery-file") : configPath; Files.createDirectories(discoveryFilePath); final Path unicastHostsPath = discoveryFilePath.resolve(UNICAST_HOSTS_FILE); try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) { writer.write(String.join("\n", hostEntries)); } - return new FileBasedUnicastHostsProvider( - new Environment(settings, configPath)).buildDynamicHosts((hosts, limitPortCounts) -> - UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, - TimeValue.timeValueSeconds(10))); + return new FileBasedUnicastHostsProvider(settings, configPath).buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); + } + + private void assertDeprecatedLocationWarning() { + assertWarnings("Found dynamic hosts list at [" + + configPath.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE) + + "] but this path is deprecated. This list should be at [" + + configPath.resolve(UNICAST_HOSTS_FILE) + + "] instead. Support for the deprecated path will be removed in future."); } } From a3a168b5c0c64a949ca0716b141cd5ccb921bba5 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 30 Aug 2018 10:17:42 +0300 Subject: [PATCH 13/52] Fix/30904 cluster formation part2 (#32877) Gradle integration for the Cluster formation plugin with ref counting --- buildSrc/build.gradle | 9 ++ .../elasticsearch/GradleServicesAdapter.java | 68 +++++++++ .../elasticsearch/gradle/Distribution.java | 36 +++++ .../ClusterformationPlugin.java | 110 +++++++++++++ .../ElasticsearchConfiguration.java | 46 ++++++ .../clusterformation/ElasticsearchNode.java | 130 ++++++++++++++++ .../ClusterformationPluginIT.java | 144 ++++++++++++++++++ .../src/testKit/clusterformation/build.gradle | 41 +++++ 8 files changed, 584 insertions(+) create mode 100644 buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java create mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java create mode 100644 buildSrc/src/testKit/clusterformation/build.gradle diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 9918d54d70737..759edc3d36e8f 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -23,6 +23,15 @@ plugins { id 'groovy' } +gradlePlugin { + plugins { + simplePlugin { + id = 'elasticsearch.clusterformation' + implementationClass = 'org.elasticsearch.gradle.clusterformation.ClusterformationPlugin' + } + } +} + group = 'org.elasticsearch.gradle' String minimumGradleVersion = file('src/main/resources/minimumGradleVersion').text.trim() diff --git a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java new file mode 100644 index 0000000000000..6d256ba044971 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch; + +import org.gradle.api.Action; +import org.gradle.api.Project; +import org.gradle.api.file.CopySpec; +import org.gradle.api.file.FileTree; +import org.gradle.api.tasks.WorkResult; +import org.gradle.process.ExecResult; +import org.gradle.process.JavaExecSpec; + +import java.io.File; + +/** + * Facilitate access to Gradle services without a direct dependency on Project. + * + * In a future release Gradle will offer service injection, this adapter plays that role until that time. + * It exposes the service methods that are part of the public API as the classes implementing them are not. + * Today service injection is not available for + * extensions. + * + * Everything exposed here must be thread safe. That is the very reason why project is not passed in directly. + */ +public class GradleServicesAdapter { + + public final Project project; + + public GradleServicesAdapter(Project project) { + this.project = project; + } + + public static GradleServicesAdapter getInstance(Project project) { + return new GradleServicesAdapter(project); + } + + public WorkResult copy(Action action) { + return project.copy(action); + } + + public WorkResult sync(Action action) { + return project.sync(action); + } + + public ExecResult javaexec(Action action) { + return project.javaexec(action); + } + + public FileTree zipTree(File zipPath) { + return project.zipTree(zipPath); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java new file mode 100644 index 0000000000000..c926e70b3f765 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +public enum Distribution { + + INTEG_TEST("integ-test-zip"), + ZIP("zip"), + ZIP_OSS("zip-oss"); + + private final String name; + + Distribution(String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java new file mode 100644 index 0000000000000..779e7b61ed9ce --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import groovy.lang.Closure; +import org.elasticsearch.GradleServicesAdapter; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.execution.TaskActionListener; +import org.gradle.api.execution.TaskExecutionListener; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.tasks.TaskState; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ClusterformationPlugin implements Plugin { + + public static final String LIST_TASK_NAME = "listElasticSearchClusters"; + public static final String EXTENSION_NAME = "elasticSearchClusters"; + + private final Logger logger = Logging.getLogger(ClusterformationPlugin.class); + + @Override + public void apply(Project project) { + NamedDomainObjectContainer container = project.container( + ElasticsearchNode.class, + (name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project)) + ); + project.getExtensions().add(EXTENSION_NAME, container); + + Task listTask = project.getTasks().create(LIST_TASK_NAME); + listTask.setGroup("ES cluster formation"); + listTask.setDescription("Lists all ES clusters configured for this project"); + listTask.doLast((Task task) -> + container.forEach((ElasticsearchConfiguration cluster) -> + logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution()) + ) + ); + + Map> taskToCluster = new HashMap<>(); + + // register an extension for all current and future tasks, so that any task can declare that it wants to use a + // specific cluster. + project.getTasks().all((Task task) -> + task.getExtensions().findByType(ExtraPropertiesExtension.class) + .set( + "useCluster", + new Closure(this, this) { + public void doCall(ElasticsearchConfiguration conf) { + taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf); + } + }) + ); + + project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> + taskExecutionGraph.getAllTasks() + .forEach(task -> + taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchConfiguration::claim) + ) + ); + project.getGradle().addListener( + new TaskActionListener() { + @Override + public void beforeActions(Task task) { + // we only start the cluster before the actions, so we'll not start it if the task is up-to-date + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::start); + } + @Override + public void afterActions(Task task) {} + } + ); + project.getGradle().addListener( + new TaskExecutionListener() { + @Override + public void afterExecute(Task task, TaskState state) { + // always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the + // cluster to start. + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::unClaimAndStop); + } + @Override + public void beforeExecute(Task task) {} + } + ); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java new file mode 100644 index 0000000000000..913d88e9fa11b --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; + +import java.util.concurrent.Future; + +public interface ElasticsearchConfiguration { + String getName(); + + Version getVersion(); + + void setVersion(Version version); + + default void setVersion(String version) { + setVersion(Version.fromString(version)); + } + + Distribution getDistribution(); + + void setDistribution(Distribution distribution); + + void claim(); + + Future start(); + + void unClaimAndStop(); +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java new file mode 100644 index 0000000000000..8b78fc2b627cb --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.GradleServicesAdapter; +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.Objects; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class ElasticsearchNode implements ElasticsearchConfiguration { + + private final String name; + private final GradleServicesAdapter services; + private final AtomicInteger noOfClaims = new AtomicInteger(); + private final AtomicBoolean started = new AtomicBoolean(false); + private final Logger logger = Logging.getLogger(ElasticsearchNode.class); + + private Distribution distribution; + private Version version; + + public ElasticsearchNode(String name, GradleServicesAdapter services) { + this.name = name; + this.services = services; + } + + @Override + public String getName() { + return name; + } + + @Override + public Version getVersion() { + return version; + } + + @Override + public void setVersion(Version version) { + checkNotRunning(); + this.version = version; + } + + @Override + public Distribution getDistribution() { + return distribution; + } + + @Override + public void setDistribution(Distribution distribution) { + checkNotRunning(); + this.distribution = distribution; + } + + @Override + public void claim() { + noOfClaims.incrementAndGet(); + } + + /** + * Start the cluster if not running. Does nothing if the cluster is already running. + * + * @return future of thread running in the background + */ + @Override + public Future start() { + if (started.getAndSet(true)) { + logger.lifecycle("Already started cluster: {}", name); + } else { + logger.lifecycle("Starting cluster: {}", name); + } + return null; + } + + /** + * Stops a running cluster if it's not claimed. Does nothing otherwise. + */ + @Override + public void unClaimAndStop() { + int decrementedClaims = noOfClaims.decrementAndGet(); + if (decrementedClaims > 0) { + logger.lifecycle("Not stopping {}, since cluster still has {} claim(s)", name, decrementedClaims); + return; + } + if (started.get() == false) { + logger.lifecycle("Asked to unClaimAndStop, but cluster was not running: {}", name); + return; + } + logger.lifecycle("Stopping {}, number of claims is {}", name, decrementedClaims); + } + + private void checkNotRunning() { + if (started.get()) { + throw new IllegalStateException("Configuration can not be altered while running "); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ElasticsearchNode that = (ElasticsearchNode) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java new file mode 100644 index 0000000000000..c690557537dfb --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; +import org.gradle.testkit.runner.TaskOutcome; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class ClusterformationPluginIT extends GradleIntegrationTestCase { + + public void testListClusters() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("listElasticSearchClusters", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":listElasticSearchClusters").getOutcome()); + assertOutputContains( + result.getOutput(), + " * myTestCluster:" + ); + + } + + public void testUseClusterByOne() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByOneWithDryRun() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s", "--dry-run") + .withPluginClasspath() + .build(); + + assertNull(result.task(":user1")); + assertOutputDoesNotContain( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByTwo() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "user2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByUpToDateTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("upToDate1", "upToDate2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate1").getOutcome()); + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void testUseClusterBySkippedTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "skipped2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void tetUseClusterBySkippedAndWorkingTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "> Task :user1", + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + +} diff --git a/buildSrc/src/testKit/clusterformation/build.gradle b/buildSrc/src/testKit/clusterformation/build.gradle new file mode 100644 index 0000000000000..ae9dd8a2c335c --- /dev/null +++ b/buildSrc/src/testKit/clusterformation/build.gradle @@ -0,0 +1,41 @@ +plugins { + id 'elasticsearch.clusterformation' +} + +elasticSearchClusters { + myTestCluster { + distribution = 'ZIP' + } +} + +task user1 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user1 executing" + } +} + +task user2 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user2 executing" + } +} + +task upToDate1 { + useCluster elasticSearchClusters.myTestCluster +} + +task upToDate2 { + useCluster elasticSearchClusters.myTestCluster +} + +task skipped1 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} + +task skipped2 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} From ec83a30345a4e48f3453e032f63513aef947be9c Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 30 Aug 2018 10:20:38 +0300 Subject: [PATCH 14/52] Upgrade to latest Gradle 4.10 (#32801) Upgrade to Gradle 4.10 --- buildSrc/build.gradle | 1 + .../src/main/resources/minimumGradleVersion | 2 +- gradle/wrapper/gradle-wrapper.jar | Bin 54413 -> 56172 bytes gradle/wrapper/gradle-wrapper.properties | 4 ++-- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 759edc3d36e8f..25d2a97302e91 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -17,6 +17,7 @@ * under the License. */ import java.nio.file.Files +import org.gradle.util.GradleVersion plugins { id 'java-gradle-plugin' diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 899dd4f5927a1..9add3349f9ea7 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -4.9 \ No newline at end of file +4.10 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 0d4a9516871afd710a9d84d89e31ba77745607bd..28861d273a5d270fd8f65dd74570c17c9c507736 100644 GIT binary patch delta 49416 zcmY&;Q*hty^L83GwrwYkZQHip*!VQIZQD*7+qUhb$v0M;|1c>21nfT@+Hj_{9>$d45!cN1Z+|hhm6CW2B$%*5##x71DzUtpf+V16 zElzocPFpPR-eheu_0Co-aw!U1GS!|WRu+eS6=*i-bu&rxIBQLttSh~9sU3AMFX@2x zq%nmhrmaWTa$Ct!O~!1!WQ2cB>#PVPX^U&i__nHAv8JqMdRZztZj@P>Ije4h7stiA z5>XAUu`FWqh&*O#H0gDtLkPKb&n>np&pb;FZ;X7U)h8~S>JK>~s4pP{H^-r&Zn?4q zOe(8!5hij5_EHq0@P7%~wWj&SjBqI%W+?e`JBVb zG!Aj3G~GvDF6jYFTuy5-Rz>CI7@bBJzfB$-8?DU{ry^ryXiS0;mM{k;l5{|%a*d`z zSlS*6b34IVZGdrG^Da#Ld0zq7k=6_Umc|SF7M0VsB}8v6#1JBv`=mQsAEP@S`TH*# zBvQLWt%2~4q4~esejaalD^Xq=cc_A8vcB>qZq3tBUvPp01?euaWWVWuw=pb!s1I1Z z>JMPQCV_Q%$NGB5QcE(S)29J3^%#D!julEoQic^cZJLKyBj((htn@yDwizDk<#CPP z%Z3Lk*m}zkxB~w6q*b_i-1REBEhkZTD1^IuM2a-8SbLGny;eB?^4pV4K9b+lf9{%T z7&HZJbvFqAT_B8JGb-Ac7`>cbWmsZNvuV1vsfu4CJ|JsnYoFp|<}d`b=ENRg&+1%1 za%`Q0spPh^84OFPbI^`T>#%3*we>0cvt2OdxbJs|5KRm!rF*Au)Uyj0v3+Gb6my?j zE1S4hc^7rTRtvmGxHnIyRi`pvmqt~D*?wlMRfZ$4eYh}-2G?iN?9vq#oeuvs#<9oa za6sXys8k3woLd~_SIXc;h z$0DlfmT^PMmuViF*vMg@(T8lL`A(?1%4HbZ7bSd3bWF#)I?n@;?uh*$(`aNN3r(Tp zRCp!-r;M%RIQ!mK#HTir7AZ{8FO4?&E_va9H4^biJ?J^oX|ymz(v+NY{al#5Dj3xp z{Ofv{<}W9AL_p3U*x$Q|L%6_`64Z^k4&2oy>@JlUo(MP`sh|W%r&Mcc6qaCV6QR69 z8kh^Nxo`Y(6`^xLuvc6S(n^Pu`&{xLsHDeWfgpxM)BcZ^Qes=H4Lwog+v~x!d!&_8n@{A>qdp}NL8Wx>qI^~B-q?W= zUuS%19~TQkFOBs0moFW^!vi_QznpC8&o4@sA!Ss+1r!@pUzYx%4D6njpp$rW#i1Ju z{g%!igq3xG&*hJ_xkoAc!!5xlQ_`-ug&WRrD*PwtmUNKS5@!p<>}`_aj(7G5D9b@W zVt|2t|Nh^`6#>&EDj!L?2w6B^Ue3cisG7jPAeQ#%T?OCoblG< zSac0a!mq!Knvf_-!7PgoEq8`KqMM5ySdyTn(i9zqjDLY855La$`F1EsLa++4Sw)3j zf7lcJtZ=y4hxlKhYcA%!$?`DjfI(H$a=rBrDo8U^QTb$B^rdoIdE*(6aD?C#BlE-L z-gQZI;Fr*(7fc13tnN;Dr{^D@m)TBNE8ySzGeo~Xe_NvDKv-~4Xe!3fP3;j~xGP!) zyqC0C#O*fJa4VCgLBg=(W)iqZ*RT$)Z42pTbG07~%fZs!e+aMPpTcg#0SxD>rhy@Y zI`C#<81&zSZm?UL_fE<;7i|%4^ZUAV&}SMLup02*?er09b#g2BdQn_r2QQenZXx$Z z;{h15yZS-~1%nOiXg=ACAF3SG4Z@NLc7~1r@__vSiWZ@4^+u=kZwS@9nO2S(@<)wB ztYQZQWWhb;zEz}#&}Dz80a$fbRRZ%LN*uF^BR}?5f2&<)r0scxHaX3SzzXgh8SPW7 zkyya%At}5B@*EU~N+Fbfb;*Zq6KkmT-)9oCnTJP`#_Rr@EvO#(Cbl^(A!B;^Z757D zzrY$(yjB+E7I83va41w}S#CxlI;ERkU8R~MPSJg67)vU26w#A~3*0W^I+%w3!-F}E zCbuk;j<*c-5Y+Jz$=Ko-y@uyuF}G1b3MO_=)U*=+6-{rBK29p6Sf{#d{B|4B=ZZd- z;hJ3M9PbyPQ|J>+Vr<%IMLnkm_R zLasPMuE9B#p{a6D8Sj6__X_sZz7+`!ES?Yyj5OI9nh0Prvo~>d&DDW5##rY|d z&FheZQEn2d;74KOoYnScr$xc1#KGtH#ZF5s;fb#!@{KJ13Ty$_?d#sh?d@MBzmqQJ zZb;)~eFVVxdp-1gEVEYA%T?7|lx=_*M_;8TFg0xV?UOTLtp64orT-S{?&CEW>>mnH zl2uQaC8Rl%59tK+Z@GUB!B5~9@23u!$fq|L5ZI?awWmB|l=*rE_78ebeP*xf^cMWp z_frh)D*{lRxk@#EnxpE)?C;w*Lj25*1dxpx{{h9|mr9Jrhe8mq%%7f^y|G`g`v3@_ zl3bqlT$XXH$mBxkt_*prvn|{Bu1;P>tESLt>xUstCl7*g5=T&zSEECOg=~N@;w+6a zYQ&IfyY$Evup=vid_zSXhHbo+gK~qgmUdMKl72=kQ0wdw1X)r}^xso1H$70-8 z4Hzt|VL8@RTZNuyzLM8*oOHO{-b$r-DzpDlI*=l%mL+e)Svr!U>E2HmX3uV(OzH_o z(!hJ$n{0yCAY9*AbMp+dK5}@hnM(>_%{Y%R`Z%DB>x2`=J+wypg_roJ%|yKbAaqhc?|bw#4C$xw9_ z$;?Go4`s^{_ENK;##3Gw?&=)#&-Dr5#ryC<-8?-+aA+lUvf;voC3E<(i_7$9=$T|xr>WO4DlfHF&Qr!@f&_#zowL_8 z^Onr@TH-6Hb42`G4)Ip4Ea6S|P}Ns#XQPDmt$p=uQ9PF$A7WS*D;^TO5Aqho5!AjLxm zRgX$?8&|zZ_1C@lBs#-HwJY~YY&k#PPUBdIKGw7>vYkiTDQ$OU@7*8)jXP1 z53y6v>PrBF-v*iQ?@#px+q4C)X-)=4bdHi((fQXK4_038twwy=cZWL8$>6|&&x~HJ z)teY(>Im}ko04XWhuzl##n7!wxe)Ej;W|R+?rEy_Es!KS`{g}2?;37gqMn^)>|_FPs`*L2$4NF52h8!Y2psVV!Zk2=9jc zRg1-79)w(G+IIGM7VB(hlaFCn7mK}?U={bU9T_4Q8YD4xF^fiS2^l@yxXwIc%UN0KDn85}3eHR{bYuD8qsU{T#w*b+7Whzn=v zkk-9ZPafofza2Jf*{?W@ZI7YfS5Crj+DRhx(wt{SRxVRs$f-< z?G+iOe8^=t#dgC0TqOOJ^Eqyy>~RUvv6jgfN8quQ=_6=9?m)YC_Qn@L(GWf;8x^n` zPxvOMI4&b%AlQz42BC;DsYAkVLOle9ua}~~O~n|tZ8bsLxt9RL;H_*w_=~dKVm^tK z$0o8k<|k%YCgqUVL=x&s%p|H@QI)48ht4smpAPTXj??(=%yjY1qB zq$#hkkp%ph_pfx++FZ{vKAGhlo|Lw-x%g4+opYIfej`3{v<)-q}m{441sJVj0 zx{a*{ik9+P*Eu`}jYQtc%E zPIz$CFv3OJt7N;Ra=0EH@{+O1A+0Q!pG_mGYsWv+<>uO!<;-c%B9EfUXoxIOCl07c6($V)3lv|Rdh~&f-^i=gw`lUD zIH3vAlxdx7l^S(Z;QE(lYVk+@47pw50@yECxY2e=t8N-;nk+kQS8uhtHPIOKd9zc# zs8o^2bQ`+P^46m|j5elbPiN56UFw}w+c;YN5)wVZp~*kagX)sK=HanYI-qqW!kg;d z!@I$W&6mq5y}G3}8B3;=*SoYSLRb~Ns5JV`KwDbqlgBwtOzwR=*S&kiclwYT1@vZO zM*-XiaT~h~n6lbcA#gf$H3KNWNuBn`Tyh3bYKU>bq1q4{G!zR%aUA1&tbd^6AYTv| zsgi%eXm*)7TwsXybd~rE?xZn<0ZiiI)faXbzTx>HlJ0S+v$Gon=fjU8>}mX1ucK^t z=XVsab;8Fw@CZ?kPhBst*vR2wVVLShS$><$!RS~sLSC<-^@Jq5mc7StJn*VJwax+0=V=zi>TqP z*enw`TH9w02H_I2o&7xQ^la+W$;Y3ypSWrP6AF^cS_A*235em_o9T^b^S|z+9;@-A z3>N(>+mNMH-WD}_RiR*JlKll81i_`uB|I8k*3dzHtYjvWU0=v@oIR+T<)@R4dJiQ= zN*;ScYJ^9>Nr$z1^kA7?2}riv!>a$Rt=iTltooj5Q$w(>s`E@ST4$z#S1lAYk}tvC zK^3d#XnZ5=w1ctQ*InY{d_w7K_mo8Uw^moR^?Q4)M!-HSJEtGg$-p~1C&60s>?+;t zt0P&3U~Du*dYZaWJhV`jnC08yBs*SyWFuxYYq( zwjSSBpk@E$%x%6WY|W-=4aE3&rsJ_3SF?Xd-;sR8BMX3mFsFvNG352Z&TYcqnxE|> zJRdfEG$kfmk2mA)XSg;WB{Yv9G^KH0q7~?vR{kwoRqhNqx5&{addZyh&VjIB%^g^O zM9~|l_Dc1q90TH&6FA%LmpM=qp>yifZfO13QZ+kW9Nj+R`~2p7Rr8e#Rl5sO|Dk_X5XK}*r0G4poNr@XB%FjR0V^TEOq>N z;cNNu@yeY%0i}oPI|1zO^2F{%>n?CPm^)5rCpD)J#A}?4!P)&Dn`>JVgxgcDSP#on z1S|_D7DXO3_7!f#3sYJ#ifN~9k?HJ#^IUI)jm)ux+*b+S3`x`TucXltc)a?lU13cB z7Ml8DT{KW(exOWba%Db!x=mah`?DJt)Js_-b4TANAVDBq++0rUr5ua=#!W<1FQygJ z?9gjTD&mPJ;DAYLR1s9Y191{NFl6(XRB(n#&^cL)+WK@}Q%jnkLgx6eBm5~Q{^CcB<@OOTF2U}XgD)AjpnZe1SBx|V#c`Hc z3@NW+y>pDB6K4;Ns!-nO3c9=H9j|1Dlap^8tSW`xbY8@i*Km1@i@WtDh=ToXGuVF0 z{&YcY@2|n-b_zGfmcN);_m9fSSo^sn5njcvaY+I5e)|}N<@fA$oSDQcoScavc_My5 zgeSiwgxJjF0mCjho()-WfbPJib}KMOSZC*xJ1)J|ieYq`UbmfubRS-~9pxG2T-_hV zlE^=@2QIG}s&6tcKc%mXXK%QIsc7=Yv>jM0)iMw%iTX@<>{lGg&HvXYI?7MYr z!;HXhFP*xmPC#eLEon}J~N>NIA+nmb(>^!pY=q%i| zSZeM}2^sk;596Gm$J!8JWox^bHoxz+;$iy>?jTmD22vQyXTPb!um;s8xmEwumJmLM zQ>*3C-W4dHQ+vzTMdcrLln zxzwkY{HqhK%UWE=m7EEhE@EVR$kSnxV|IOX8K~2&k#kvZIxz+cN$68qmRPJxq@<24 zg;8+iL!GsMN`juk%c~qWGJ~!#s`97_h1kqD+nUz%4N;#WFQS|F=`HSEHtw--4!;{T z?F{DhKPM~{?he*ma4*(DLgS3&$foFD5TZA4!eeIY6lNKGjS&Z0S}?0ObvF8M-=Akz z>tEiowh0Hhr&#QfHAIwma~x#^tQ03U21Z`L+Y-EMKOqRn zxR!FUE#{Nxk1=d%7TC&`>dekix|--BZTyrFRB0`7WGe>@+8rRVNm;+|rimWWIL$%x zU(-ORcvc%08on5KO4{Tntu2IC^FEvO#Ti~>&udARAMw4`_4nBEfuvWOq8FDy0)9qg z$anmpWh?Acw@+o%_d9Yzp;36=fTA>P5V(l5-dJT-@ummQEjUcrc9j7;!5SA_9=pFy zibASX@j3vOx!VBFS(edJyqpnH2z1RrV<8~~I?z*+r8o*e0)_UdmbGvL7+horzq3JEaa@F}z zD0}F^D*iB0AZ?Pyr+U#!E=fMsp>ueWrtVX0L!?E@CqYHl`p>6FoY38t>g}gA+Iq8z zP@Sr{<54M(D6ReP@}NLl4L>5i+Q}%LRmE_aa{4-hrxTf_e9ZOViDt{`Bxf#2UHA(- z7C?$kZ#xQ=`x#;9RdJV@#%k%r=P+GciUp^Rt+>u!-sBD=p9+$Kuft-x5mgqogN^1f zo!DDw6tkRsjTvv9?RTUy_oL7N$Oo7mzZIcjY;W`=X;Q>MqC7I`0yPD+KYS}c4%jBU z1kF2$_HCm06)qtP&UnS=+K^E$HvWCf+69n9h>?bi^t(F$NWwoGqd(uvLH`hC;g3=Z zF+vH%MZHcb{QifnmVmg5zQHZFvG|}TUY7yGkq9`* zw0+_RN3E9jywV{tnO>iU>)3m8Ia4F?Jly^q2kg;tJ?{-O1=^Fz1fkNDXay?rdTj}_f(+Z-&GJ_Dbo`-; z1ESx)U<5I4rcJumYw~EKBBWu?{DDguB#rtrZF&^AE?5YtkfE9>9?fC%h3I3NW*^Me zYaPuq@6|I(g5&wW>{jMjj>W`%17Nl(y2B)lL?hwcVE|b;U;_Cy>dauGF7701%`L+m z7s+%y%o!7r%gywB$28Lg!HsKt%z&yks-WwIfLFU}O)ZD#Jl~J#`L~582-vZ5is;x- z$jzs$g{55(QG#C9V)>^C#Gyz?d}4}u%L*h$D`1#7J_)eES+m6qFl!MeCB>NY`y~S0(%##lg&ig^^ z=rwdb{&9I2hIqdq!SxweGeqVu-1TyQbM=!9BKkz%^!aI8-_I8j*Q?}`663(Wg;8-O z%3!@gGn)_)$7Qtt#43cLLL~5n`oA|W_jv-+2OJD6EIFT; z6WCJR5=Ie#C-sQwTxJ)B!;saJUPQyTmSe_<4Eh%7qynCJkCADPi7*;R5FQnv!HiZw z_SA^{x2)>22Aw95-&l^Vx%kL+w#F0q`U^tuN5H~(JdBJfhokyIOIk+~5)xCQEfie< zGe;>;8}VIP*4$teKCzr~S~|l5N1sy@3-Dax3tgE$blIf2wv3fGaj+XQo?nDHZLHiX zQ(JBR@A90yAdqu8hu%!Dwk2*%V6VpVi>)PHsrxU1T+0lOh5>j-`_s$$yY3V1f8b)i z{E24Y89OQ83#c7<{F*&l#(D@{=S~O1_1JH6E_6(HtiwM2t0xnry9{-heJv+Wfp_Y; z8ugCydg!Sgx|h{$@;r~7A>Gg#&-CnfgdUK%zxdRO7DG0A8U0oE$Pi*P+PJjW1j!us zO_&Ph)oM2xwsx2{eP+tO+Cu!dqWY^++Ysp+bn!uY%zBow0>gC7J1)q+PBpq+Q^gSh zYXk@`U}P-IBE4e_rL#y2gv1^M)RbQi(w%|=P446P#F73*+uso0740ooU2(Q6RtXB3 zo-?@q_YH~M5HiV#*c0TA)U=}Agp;Nwm3Q0bDHH8*mqk$XEV93jT!+}}Y4H`>6~&J~ z|M|E`Xe%Xe|3ic2QH6?s#o=)jrKb%EJt=Ze);WQf*Fv(B+#C9tE-uhRdG?)RMDcMJ0nx-FZbq=2RiHlL2e4Qh@@|z~7=!4Ik z1+R-!S7-!eygl{>_TOCM{C|EQ)xj9n8ypx|Hx3vW-G7D&RQ-eBcV>|kVrb9_079yZN%E2rD z_${E*Pnd~V-C1as&P#Y8*w9ZpNU+8fRjD85RYwsSi^97En4)DXqX zUv#LFlk6Bjabo83)`6XWcbH7q{hG23=nfpw2?ozqd4cDv0U&Qbkr8YVC6tlo9VTY> zod%}S&|1%HSjk%%_MYmU!`nNd_iG9NXPxRy8AG2@dR{fI*P`;DGgm_IBzIpDs$t|oC`*`nqCfZzB zTXL^(d=d5pjOu?eyQvEm!W8@1Q>;6VkT)iB22LWer8PWea8yVohY1%%bT``-^zbdO zWS*$yN^UgQciEA^8|1(+lF@CUMM~7vf>S;r27~7zxn?X{hLf=rbMbhrMRi&^?naHFk-7farm>k|_LT!#ZU&mMivR}17Ucqh0=YQ_P7 z2TYS4E`0n#KC2AkpqAOsa((m*CI=!i1{@MA29~No9TQ^orW6$7!jai!E4ZXTH#&%v zaQt)I2z8G{Jq*!j(hJLS zQiJjsr$yGtzC}a;5KMFPU8`(JWGUArCSrOF>~DJiv_8HyH`b7fDF!WqKZXrz&nnm@ z)0-*!qh@hUrldq!ERkAj5BU3bc~j9?$q)ezUvATjN*1w+l&g*x2WA5A9l0|e(XVVe zrJ=}pppcm^D)P<(VI}VGimA#29f?1ZK@AIeByolDu+vkv+l2T^x=0x%n+N6;yxnWx z@e`ftqb-~BSCs~g*1gsPF+WokB#j;Zj<{BiOKHtUoQp;lKW}OBNO1I3ROyunBd!7a zx`^Z*IztK_DnlyU<7w`Vwo7+hpSgMTk4!^jdj#PvJTlX#Dwc6!zkiy+4*CwF!-wz^Nmjxw04jLSSGjUhZ$6gVB=|468dow%R%~x zSbx;TGpR5RFtyJQdw*_p7prKNPMHAZ-mF+0PbQpjXU8RB*hic`Dk*1OjAD&??UGyQ zux2rlcM{?TRml@sO2P9xuv>e2)(sIUXcu&f7}vFJ<_sj#9cqqKplNDH!p zh?6XBQl`QsCgqOD7q{MPh0&Vvj6{JH+ZuCRJJZzVHP;YJThrDG^!3fcG1D?k9M1DN z7AjE^f)(^gXCF?d;TV|3OK7hi$hhH|M&S&u0}}xg$V*MiOWpg$mfJfOa(kO1Lw4!N z*evU$!%vZ}hU_Cni?cWDSZl!2+IQwxEShvNB&$L&9fjFYit5qo3&#RkP*#V!K2<{^ z|6(mQO0%yL3CqtAaw)P=ClWM1+Vt7 z^^u6CoHpx3v^dUmb?euGvcc47A`*sw_|wU1Xz_O0vCV|@5M~6Djn6+ z3I9ej7ENWNQ&nA^y&uWZJ=5~pKAl&u&fl>7Gx3kW32T{Wx=q!UTE?W3%p|9OXV~iW zx}0*O;mVBK990#?8Q8~ktIFL}T9&PL#1ZnCd-NTba++#)w*Lv5-qGTZpt<0Y*{{Og zX5rCaSKq>edv=kQ)4c(jOx`cyoo%k8qT}h*(=59CoLrpncHWB;%_N(ZdM%)|%o^8r z!Z$lZn_#>C1jX$9NuJI<@X7KNSO>=#Y6~hOcAG-hLY+-L@j3MPS zbQiTdpdL9XQkBKy1;TCS4!+)AIjZ5KK0Vp!>9#_w)=D2L$KnD;i+|@>9l{dXSLLhV z>_GSb&P-hV+*q1N@;>9{T;z>z|IQ~|e&L5*JFc!5)EF&Qvos(|5M|(Ff-r`BPd}Tv z_4-YF*!?}_6D#L7rY9yxbOV=Sbn}|?|tb(1?@+`QRou$-dHHMmP)WCj+e6q6N?ti z!iS!IYB$d9HAlOTWyWVyvEAa=?E&Zh&$$msq|DRCV{QS!5GXJ~nQFw+`{uPjM)EAS zZ~MoxFz2H%r)TnSV2(fK3ykZtMhVJ&bJ&%82;>`vUmB;PFu!pz!J)1tMEapmT~;s{ zVfY(^6wc5vStGZYGiHbYXQWPHnM}%~6X;Q~(9Ig$qL!$S8p@(?PwoACw-{yb{Xf!p zr5mL}GAIWCPoH)`1Fai?YnI_DZ(TN`vpIVgh^2kiouX3Nsd~i}ohnlnOYx?iRw`Vb zONOd7>}&T%3+U8Y*X^}Li`LcX+vE-%5&L&f%W!b1#~W#Z{$1YWEn$x@5EH$_9FBg% zMgy+8{AL%Hcum9Y!B$sS%9Eo2D^@#*s%8xDS{NR9p109DVtuzHL=aUnA^$l*wH>9o z9mvQXOz(tr({?*b_=R#CLD(Js#=;{Dq?v)E_D=lbAD^34Nl~8l=MJN3eC{8{F@1b{ z`m{=t!{s3ub{;NN*5s2~RPmy95D?l(I`#H`yy_Vc2gH=;)x`itr5FCOEe$-jL|Gg(;2u2wP)SPhOmL_ru%_ zH4+SxA3WPgx_-MhRS!PHj~|vXL_f0|*48?~*}d95x4s6^>hlIp!EHPpH&o9w9ku&r z?O+&CKke$ow$j$fnfSxutFCuk8LEesHP`?ZHsINFDw5UpjG>G##(|LUb$9$hwPKX9 zf0H|0z~^|;9FKR=|VkOA4gYTxi`Dl`GfnuY#G)Alr*5y+NZxB|CrbL_@LRr{ag>SLWI_Qjcna=BNNxNaO7kP~@m4i12r_yg@-@NWdyH&xJ%8wqXYr_v|Ig==1}#Y^w7;zJvMw$= zMY1vs<}3 z=nb12Mt9$rEs0?&O6HEI*KybPW#_5uML^)=mO2lz0TnP1b&*e!kztT2qiCdbHU-v0 zf}*&_P(rJVaKsRxctFQmv{6Sc{G?invM#bozW;gO2;SWsVeJPuD%Aq(gh-3b63rd1D$WgCsDG(5!{(^bFFH7ixzr4>*@tA?sv` zm*THzmt-d1JOVW*k~yMxe_g=o8{~GLichc+`cAjnhT9Lx#l|As&$CXXIbRoB)*W9a zDHZVDbablEt~945@*Z4<2K^VL2yX1lOFcV^wUvB$ADz*Ylv4vI3}lsS2V5&vSl4-o zBD}nt*INyZHP{$~JrT`+T#6kHtyO0rc;dj{7+1Ji#>H+D&EVw6~j#TdX{xYcK0bjmU+v zVu>%=#=W1@z$i6$zi(1hUO~(Ojki$jtw`!Xz{Nlo)7!E}tC*~%d&?U~-C!CZuZUUY zd|}VsWY(M9MN{Okyu_9)<})BP_^tu6z%)_;X`~@2)W4~ygE)MQ0K}C127mOim>72R ziHgi-_xfYii@Z<-cT1Gd%%m&4w)9J@^U_5s%@N7mq->U;2%FT9Cs|HY)B4=T9I`}K zb1!e|>cg0@`XhWij{^=oPP6E++N5s0E%rx~t}w#&N)xVL8J(vpc0c%f^o};v!1gqG zdbwk&&)uXLDNgTM3u=Cv@4_`n8JX!Q=Azxd!kMi-WUq+=;O#C%7vRB({4I$Qrk zBnJT}?LdZfm>lA&qnzf?nwQ~!_FO>l9;-K=;qWc{e|(Pw0MPl-r2i^TOnPj_t?4w~ zvcTs;s+ONQq4a<+po%f`dJsc>sr-JX{?r*VDuE4V)vm^(rA@K0D?Cr5r8cSZrT36^BkO^yVw|SbySpk(xnvy_R#c1XA;eH|aOEvYu z#DCJdCb3f!V6fP#dCe0~&TBWW1Q#WW@lkA`a55{>nRBqutF?44h(g}eZ_9|#LAchJ z<$ICGAgn&d{cvZV>p#zoQ8{L8%zF`kkNN=6xlT=6n<5)JcgrVxxlXg}mm}ZSq&$yi za3^HX@m*JcCaiKQnXPP=aoKH7oP0LS%88Y#Fs6k8uau!IqiI9e4325*!LC$}DO9%| zF3B*}s9OP%@7dAT?0O&Fg`_19YUDRjXg%WB-@=iTp^at6)n|4e`(}P}ci)voWwkCE zB%c2a2(9w&;kM5)cc*I;Qb+8Nz?*lagr0wIP1o5IkytkBb)O65`P6XOU~uSsV70G= z892cJ95e>7DvdwB{=z*-EEJMXF1oruH9bWI(`4(Sl@!(Th=Lrd-Vc0#%ZF8ay(To8%f-q4H_dAK1-&KA(J ztVDSt+_>@6BuV_pPb@BN1^MiQ5B)v}3AX=$7rg6{n*>LpU8UoqM`aunT7zyc)CI8# zS8H0c35j8(Tq(OBOOw`D9Z@AQrDtzVhqe_l7E9Vabun=^HTyvc(F3C^%^j!qPEAK9 z6_dC$g}FKO&Dw(9ws^_WmpUp-OXb6nV&(y3mGCMN zOU1-7mXxS&6ZP;EPf}nW6M2qIqVWnCJ344aDKd(QVEGrK<|n_deeor%fReoxF~g4P z9b!F=7U^vYVo!WUY4nPd_6yVY%forYSLzM<)`;gjqPtC%)M4;=gv1i`Hzfz_2nP+> zRGUA_|Ij%mqMU^;Z|EBc9O-?XZ<4sIoc0RI)Tpq;eff9j`6r)e_>Y($l-r?z|FF0A zVMEYDh!>=1lG1(zcO+^ITD^`+|F=!TB89JhoD=%xHiV;N_@gJx92np5imM-5KSduY zOEk7u+Vr{+lq$IIS<|Y$f}^JR9tA;LH$Di<=72xT zq^ffewZg$tf}1Y{KSUf)c6NJ0ZF1Q98I@>9{@M2h{=bL%!Q#oD>AwKJIXxH{$^TQ} z{){04xjHb$>L-L>U#>?6aWJAJ{VYo6=4hN~7>K0O?2r@)5E_0mhDpKrpg)8Zw$-iL zH3q913%r+uA{y#wKfqT5@waT#TXn5!yISj_%KE-u{xGE(4oI23y@P5LQY@3wAGnRO zz?rdK64UFBO>L;`^bbo(`6Zr}R=l#p%YD6}=a#+F(k1WgnTe-d&CArYI`N3lN*n!R z(&Nv(4e5^1j~~QsCQCB+;-|a=M=^;ED?l<$W{K%k&ZX1&*9U4P^=K~GQ@X9`VIVXB znK|{}vc2IbhoRvteK$_+vR7)lB?R`WVse>g)(W^utnh1GxKes18@pjOD3gejU!NNN z$&fb@zl5b{dt3pJ%Z?!7fR|b0-iMj-uwaywnGx$en)~`7!mmY|&FHAPTYX3+*33|s zwCAK$!!IWMXwfmjvNxT}r8<0z->(fo@m1u^pW4Gr>peSoC=oKEdFvg;_L>O6S@eLXoT5)xuQ$+b^FvirT;T=Gf>qGi z2NfteG}%#pW=$>k@uS7*NUrPJKpvHaGn=`MY<0xnrhEjkg%v@V~iiM5^87l!h}m^)IJS(F*FS5 z<9yGuczUvAXj}h|AJvv5k3t6^`GD$Xv}8fdZ&$=*NN^31RJ^Q8DGaeG;VFmaKlsXUj@s5>7Y-;gD+sH?C@W~ zyT2&?;crm>Sc2O+LN_zQVE=c8=#cj z+A_2B=f`W#CY|KA!S$EEo1~ILdm3~%s>lo2&G>og z8Q`}#YD24WCTwE-K)e-O@pG5OFfhY?y28e1oSa2rM%m)%RXsAYb%QD52B)g2n`oIF zM5LdwE@)&lD3~Hc#o0_T6g{U87JTMl*a5@JX#DT_BdgWTq$mK?9K;Q}HgcLeRLYOA z=f~)_v=YZx-y4BC>Nf9jNv(rd=*vD`A3P`t)vZ2_|pZn16)}m9_tk&hjbgiApE>9vj=RK zLne?7lUZ#0QQ3XsCoqxo34gJVTDmp6-E>ShU^>XrO2ZOM$)POKnM(u_v?NE79;(TZ z$Ss~yD#8I3zcAp37Jq4k9FekNo)bnfSOwE=Nb>RFa)&>S)X^-z+NSW(*s%9X9vKa^ zU6C_7*pSBBxEPKqr)NrU5s?aoH*rfTn_qCG#EdCrvW8aDnL*2!O_3w%>eQ;0WYW#F z{8U$i(l|BG7EfjIFS9M7D6j!P?;wXsF%+XTQ4j*yYsA6|C~9!Qt6!y!YySxHt0Cq4 z;DpDse_FOG-j1@$HB1Ybx|j1Qp)FsNpK2nlRa7|g{mNVC(rLSRWVl(o5PO#{7iiHc z2Nt(@1!Utd9{qm{mp`2QH2@Jy&?6YGnBY~KdaMG*Nn-6Ne!IcdgXGue?+wD zg(GPpUJ0J{1nuzd4Q;virr(N{Oy~z$`}NK8br_r>?N&Z81qf#;<`QEd)h&8+^+d`q z_M>0P`VW`sCe>m69D5`TWmU9Otwo(XhoK|UwU_MPO)mlLN%K>oo| z7XK`ClnR_hejw*Hkea_O{>+&DAi?~p38hnDIAR8rEaXRqNkYKca{GvSiFROdW;p{& zcsR@$U6z!a_$Ml_s@&lH1Ngy}L%fAScK_dzr2gs~CdS^KQ?9e_!=M|bz zviWS8or^fkKlv-YyJOPKfXA^(;c@v%&{O$v`})HBB_cT|X7w1R-PF#6qUcqSF$XWb z3PT^{lVQ-5;<>^VPDZ0n&}=ok{1)$*Ec}){3!L^9XfC|fo5qN-tyy)vS^?vA&snp~ z_j{15a*LlmFh-T{3TNqWROWt(MPB{%6Al7bKXSiFw?_S3Y13jz1w=)IKtcD?{Obk! z*iW?jWI6@!dcWz_0W%8S%XBAD)oA_2C$CTle>b}|9t#Uu-*(2xL4it#T7e4gx+J-M zWc5g+R1EzA5- zzO2a$jILdN@;FNE^LzD6fwFT@0m)LXn{mfw83<)rdA0w#j~&Rbcfki0pT~ z)nkIUF3Ti4g<9l#@b8nEg>lJ`!H7YFs*~7JkHc#Ik?n?c$Fa&-4UhjvvYN49rxo-LVG@ujP6Y?cY~1wUjCeIbAd5MA{QO zv{Nm*$!P6F@WNPt!(NNjlwfX_5Tbex4?~#Xzx@S=B>Kom<(6Iq<3hvLvFL4UB+spu z0Rfa1(e3OwY3eE&4GOb633rfIhP~Ze2$#5YD_a&q^i35Xu+&`jti?AS&Q~@~S*Z%} zOo^HL=2EArFFW}@Pqe(;TA6LWHPT5)j40LLTeUmPf8%Qay2@Jlg8i$HsrHihu574e z@x_h(8;2G162!Y|SuWXDq-|q9A~HqE)t&z2?S$<$Ojxj^jB5IYR<;ge0^e@kNF+-8 z7#N^9cCZfzaLY*J{xg{;yHYpsl;K)2W0*13noky_>{SFRj$gcIF0dmQrPI7g2CRSm ziQ{?n#ZyKAAeBxexzQxQ;k0DpWu+fbiiGlJxT?#r8YJu}(QQhS8mM>5yB-OJHlcQ3ovV?IO(D-BUU#9dm%(fI?>OC)pn>~f} z|G0Xm;LN%&+B@vHW81cE+qTUoPM+9)qE5%|*d2Ck+qR7kI$yq1=iZR&nTprtQ8YZVGD}H59W}yl;ixf&O)1kX=)s9lg z@3=OV*bXJ*1M28|_3u3AfX#0+$CITLIm<-T>M)HQ+5s$NYHp4aot&pOb|wB~Zpn#L z1@6(O9WP^r*QyCXgEH2s*Y>X?C>UYRB?1y<2zHc0HWvm?FMbQuDRTwKQ zr_>MXWSnSOI`Lr@44i1~1eqVW9|Y`TlS+3f5rIag{DjRwoXMWEkT+YfZ=rHILe~6; zsqMx124_S5z`PDn!~)jKhLEdG^pseXP4@NlDS;(;mX?gy9~Po-CTpj>8@j`EVih2|0{#(hwIDYBKgLE z)k}^kAIQghTgbAedDcq*EQNUE=Fxurj;FUK&nyz4nJY~_L$RohGR&&=jA|2BsVkL+ z&5TY(k*o%?DCbQ50+cJ%35A~YxQ(DE)GcOVBaN72CVyg~Tv)t`iqbY1spb4A2PjaH zJ4HHEDf&^Y9ZoF7erI9Q10l>E!!cjqmi}b40B17-U8x6l?$QHhfDc*cdSE0zkjSxU zTSO7)1+K)})?PPRJ-dmwSv2bze`OheMUHpRiPR@j^!67_alvL7d!aj0Y3&!eskt&_ zrj9iLTw}L_X-g6v_l#gm7G&psm)$|zy`-wq2--mj_p8?nb8!nWYioUOFvnJ$Dh!+C zop*2|U(>2v{l+g0gXP7P*3aD$P{c=>ZD$FTuae@gXGWmLo@Md{>pLB&s)3?rM>dlh zNK({=vFViR7=AEuMZQ$5dx5f`k3{gXv-??FD|Va`%y}(1aEJb zlbLCxeoj~O3-FfRr~B*ND2o(}cfJi|<*58)lBZ+|A;<;&F;p#*B=-eHJ|Vm|8`c@9 zE22&J{TTdjef}G%%YNS!?}I*{wyRhzM5%^ef%`@a4g(@Q|8~E!E%$@@99k?_uolI2kzH} z`wcdWRM;DPX@ySJ@v30IgZe8An(b+T$mL~VmjrRjL9*0El0*i_REM|XX4QV&X^VVrN8*z}-a{2O zkFNaEP`V47c9hvK!NnGV@uFC=IY&d6ew*5BhL%u+($$)EK;N(;`aOLi-S=shB`>{k z;RzzItTac-Fr0>R*HZUU3(qdpFu_#VueAY>Dl<{18ZbxK!^c?GlA{$nSYThqdHF!7 zJHbmBdbz`hZ3RGGEdR%*m&^j}T;#yGWJLL_Mp<=~G5u{moWns_Uw|=dDm3sr=1PiS zEkOprTsZN;^9e@MyzdVa@(LN$g1+mAy|5a6^G*`z*`NDFH`vl6qunc;SF|l!Xib*< zk^2wKVUi;&H&WI&$QHLt1Q-@0e~R&n{(+0vhU51s1{~bLw0kIIyA1%+HeVNY(>5bZ z1^LSU@Mb!B^BWtWAK5>JFj7|Ae(4r3%HZ^sECSj*(9MG5g|;z`&Yu2^I9(DW~b~Y@e;0c@eV-N1MIkEJ%fDUQ-F9;dBL%Q2!M_cG+Qc-b!>hHx-}h&bO7Lo{Q_X`2f{gDq2W3)n=)u6dCdwC|P&`i_C zab@jM2+@ta544!`c;Ww#bxoOcb6Vy016lJrBWW@?l@-E94h@|LAsneP84?ZTFQkRZ z^*!^9#j^sa;F@vAc;G7W_V!(c$qQX@rk}}^I;y6JHn~YlB}(Axl*w@ueo#g-WP1xD zdWFK~LY78+-$7HY_>LyXug$p6ReoliDPv$~A|K!vii8&lxXRnDd%O9Lyiz*H!}jlpUUt&h)lG~@U&QmS!6inVo)HH ztV+hgxV6OMAS0`y3K{OT32+qgHL?}`wE`IBlq%JynF~5QKw>$GmgZ za1%J?=jV^zrarxxPOpW9f#5dzxYCHf&00g&Sv6GRiz3=Nyo%fCCtDyidlNzoBThzF zP?#Y3O?14ibRyoeN!5V7wFpl7uUzKaQ_{|k3onVb0vzpyA2R~QEy>nS!bpiRhG$?G z9DYM;7LjN0H~e%Y^ZtcNS1saPI=gNb<#kzK6fG!@m6rKnr$0oIEJhv;l>&c7=}L@@ zA)v5_>L!bEqmxBFCqSuWvW%NP^-_zR7%l}@V~~n=Xpn54d3zm5&}Jrr{gheg29iV6 z4-hAJwPguGE>8kpKo4_F7Hs^$3iVy}Kf>^0zW!&R5V&^cu1zz)jRMJ;wPm@g^r~Li zq-2`xkmczII}15rFWh4{cnup+U+~ZO$5*9dn_Q}Xf*B=l}5WJVhi>`UnoFT^~St9D%OW*Z8&KQo2E z&2+whRCn{6O47NDXuoie9V;~s`NKBHZf-?qdmSD#cdjgvWHYNOG>8Dxc50s}h!kOq zRZJ=71!<#ilFK-h3aeW}>&q2OlbY#hA^jX_zRai$ljOz`disA8!?v-wEdM!=)|6@J z2DEpOP2XxM4vjunRi59m!of&zAu@>oQTM?i7@%?bZNP|&gwo-DGCz+vRi#N3Prle! zQ=an{sIXQtAe;B2ec}>*vO|XrK}r{}LZ!+L6<*#@-NsDAO$GJ^k3V}n{DO&b+Lf>u z#3+0u+OV(T&uk*Fl44blRqejPgOf9$Wyd#X9VB+JXd=MJ1r%|HuKv-Q;Jy`0gbtQY~ zs097gTukJeGM2O8M~%J$^x{tygllSS=Tn))lI zb~E*J56o~&zG~rYr~m!S@&|p$i~NP?5AzOl*lj3_Q7VYJ{G}j&t;J|f^(9VuQeZex<~ z$^J_JJEx%MTzv%X3Cd8A%n&)l59?jX}oYlGX_wt0n>XM|F z$Bm;Z%odtU^y-=sJv?5PEo?bSXGKOFt|0{O)La#H%UU?ZW?fer9dqkl3B8qSufsdA zdesj*kZYtpnzrI4V!BNUZ*iYA% zjI86-gYiv9mOUwkyeRTzPKjh{xQ3rqNkUBG$1#@$)C> zU%$CVH#)d1JMt9v*i#ni&#KRWG)^*Vj<`KhB;qe8f$$PbaGs4fgtDE^7Y*kq_33)n zTe?y2tTIK?HF^kT7#g<{vR}aqq~a4EX2wZPjW8aw!|0y7u}=;R&A`kKcm@EOxr|hA z^^w>!4-Kakg!1>i@B$NsS)}m(MG4!!u(x1(v=4CEFmXB^&83?kge_JUvcz=57Io9{ zERlS|`<`{UTaL4G#|bj~QNthlb6H_#ukej?TImfopx?t=NjP*%CDL{##6-|t*}VMp zHo$C62z6BL$6qo1_YpXJu44s^#UIUTiRrd~iKH;WaBd)@ygxRts>#%iNvp=!+Qk|j z*kTGBsN!eu@ZHpLe{!6B3lIF{?;I2$AQsRm8vfqY?fUzIZy(A2Bfnv$1PEbSpC7S3 zd-zS=EO*2CQ3^gR$0M>*rs|1?Oj)p?Id|0obVha!Fw9W%YE zXLR$As(g0W;( zzbuCSX96wL2>nr0vY*@HEK5;9NkPSOy8h0btTFW)eH1>c64nBUCawu(jn+R(Cw2_U zuaa@7*&35hD3M-ib0}At&$u>Kz~564LaynNjeK=Rv{{C~S#~Zb{wQ8RNT9Fe0n;I_ z>EX1zaVi>Ro?U~u2TSe?A)7+fM-^eX%;ZySU$Z%c(ciDP+kGN=R2W_6q;^iw?*@S- zEOLyK1s8FL8bJb-;dk^-Rg21c=lR=9+KQ zo1x#=sJhR!bEjfpJEm9|;g{sv#yQG8A`J{bJ~)th7$DpLk(xYDpv?Muy14O*NOfsG z01!1?u05?_>JLw7ttvii?Ehk@|07zf9CnJ^=g~WLhXDaMl@I0zumXwmKS>|<*o36s zwT7&}-wXI<-9ca(I!oDINY@=S0OQ<6Uip;eP61R!W%M}S<dD%Pp*341l0ts_suJxro_3K-)lPBWIJ6f$OY>ox$L~#GUJBIM+Y-QReW(H$>%_ zNur_CKNG;DVyhi7;@-%X$X5tuO&1{H2an*l0K_@rcp}f4GMbNa>&mjBd>DG~7#vX# zQ7?Mg{X;aSNu~@enolW>!HSvP>saMupRD8_MXI7(_;1D%JhymREuh)3jVa)$s@%ANA!WjvpsB;}fxi^H@qt0JyWPfQ zPXg&=XfoAr?7$c^Z_JGn5Y)0kW~^YC$CU5QR-FmlS+`V7GPmgh7PH8VL6j6FI_Y8= zGqIC0q}4qtF0%Q8^_iLYKigT@`31#Uxrq3s4-Y6WW3WbTtmsl)9QqX`q4kvXt)^xb ztc}JG0Kr43{T$h<;L7FBMm>IOdZjIZIUviUcc7*0@8hPDIHnw3Sw{BS)d+)(R3mD~ z1E_D*jQp6gt7~pjiB)TQT$Ss|1HMzU+Exy+2*We()|?=#pQIke9%V~(E-V^|%XMV? zQc{Uem>E99BNC-e49|a&K+7}Dn87BPTvk^1O0yD~CMG$Seh$74jK8c2``K*o4S-9_ zr!tM|e=#>bDtNPu+KZ_P3-RLmAv)-$5_#LJdD9UCrCSIZ1+rq!>Y%gaqX1HjUyCe7 zqVIaXiFw4xRXC?I`0V>6bUa)^nBbs%=Q&xf;NLf!lF`=xShUj+f+(N9SYa|)t)iY! zYg()@Il3-jbFmBOa}Wv%NAFDi2>enPkZEG;HY)a5a#3v7`Hrj7YFUr+JC342PD^LYa|b2(rRf|nHqk3NQm*e%?)|h^be{$FdU2=sd#}U1~&S-S%&n% z3#s1H3aQ`n{!zK5{G)cu`N#B1!$0;K>XrCo6Yip_+G9yVQwn-P4(}Rtc$F!?zGU|c z-2sU0KrVI4TNj^g4R3ER-GU{X+_S}~e%O%)Ys_YTXrY|K%^FPz@5ub?k7I?JP$B2b zoO>Y7*0qi#CIWpEp^OAD2M9}bKC?HpJ#v*YL+JA{HY}#u6l0IQ&pmcAuH@9~GUpKa=xvdO$KL)U1`+G9Mjw``|Gf7YTZfE2^X$>CAf%b@}D9|S?3s$;!b{UAU z=Y>5dzj{I!3aeTn3HCZOT@MF|LlN_H(D()o?!6^CVo?7^;R$=^1NBevqS)OJ% z%jDdT+uj0*tQVw>Aij$eS*9@T&3&0qm>6e4rk;OHXJx{xjD% zr#^8ZUsS6dQv$#+NrE}fWM@LBKLKIt+wL%UQ_3(G8x93f(4smc4}1>o$UD2ybPkP@ z6#>WWH{w1WZkJT8@Q&x6Mtd41lbWwxOll3l-`DIovvWM>$-bg^J?dG*Opj;9iAIh2x;@q0{rUb5ogX4?YtZKpk0377FSASZ9J`^>+BQ82>6D(LqqIjF>S^Qm z$DW(-&!#7il-=+V^}Gwi_R8v(|5(yxHL&(a?qCwKpI+%a$C-wEmfJS}ys#P0x#D8t z#Mqo$mwR8*yIEcY$Q$E{Dt9&EvX(_c{?IUD#rG}g>_^t6jx#?s(s+O~mtODYC1hi> zeLI(Mti^j(c+1V_FWtcKk4HWgMS~sjvVN$fAv_# zuPd?+?K?K6Yx!^ftKi4JqmDccM(^NmseYOvn_A}I+T3=FA344Nzt6>`pA`|P7B~^p ze^5Ttyzv@FY)0JGccl93A_$zbKY-JOx(aF> z;c#PiyCh=@$=0nQm3wHR0=0vW*D?Wu#OWv@2(Ir20`c}@ono7)Th-HmWMI39EX*pI ztY5_Y*HvUu$v3Ckol+Xrxx_x_t$XIjxcwvkS>gBCmr zrcj2IQZ_IOdJ(Hs5^=VX6tsf`JPVm`J_=dXkRomn7FuvANus&*qH=agVTcWKS9}n0~X+4;QSHp zCtW?cbQIWka5OP{F)(v}C>oWTxnvZn76PMe5%l(wm}&}$VK+-$nGPp<{`UHx)%vzx zo-iHwTrxV1={a?P=YZ|!Tk40#OCJi-aVeIk(Cu@dt!sDT+V_p_yg;lTtateVd&y3w zFlh>n-AA%564I!7G^oyGeDlCxB_g83 zT|@udO#y+Sg+@|=se5$)J0hbl?y%?E_dp^cls*`rz?4zqy~RlJ7uady!(9Vd;{Ct- z0{&tH)Z#A~^f$IvpK)RQHyD*X{gwBfuMV(z=8w!Q{gSAS0@JL2mR2trFAcC%Pb(v~5nW+mRxnU_GjPL0@e{^LAUe zB0bp)Be&JR%?b^JHGEQcF)BL?jxfbHYcXj?hhh+UENmdKq`6o6+$vUuIA$h$X-_1{ z4dK^0;Vp%H`WAs@BzyE0^#yde2%5xf4Ib2$^goKJ<;C<8?({TE4H`#aHrxDXn-bxk zD#`v(?ZHc$i*a`0rTBWNrNcn%&3;nbN-Vm;>bg_-9=z(^i=Q2>javoS61F>~1j_x< zHtoRZ*Fd6o03XaGu|E&6NJ7g(qRszn1p&=Kc;DMCn)U*$5Cd;m{eGryaS!2^hmJ^> z2ADlhTi+?Gy}-Ef$m^rw_rKbtQkK#i3kgp3K^C2`%;~OR%&D#@n`}e6n;E7WLteLnDWF>o z`$#pFF9V2hk}DeW*bAvV-M$RnQE?nG>u}T;Y0FqLf>8D-iL_ z9W8vAo%{=~I?za}E*RJwX3SQwRR&B!=YKTgGUtuy&^QQ7R7MIIyE~P7TBx-scQ~)b zec^toj@!OHd&Vg-N~5(e(be+S8=_S(9>*?o)`RlZ=;A#%1u`5XvwcwnRr1tZYo!UeuQs=`Bm#rAZnRBZaL zn37WWQn$JNJm0)0gQM|YS#e?0ZU@S(K;*u?v3AV5q31(-dx8a_$|8X!oxl2ZEgE~A z4>L-%rNH$YE$S<5k06?WP0rQ}T7K?%kKjHW4#1uL=Bi1@pvz1LRR7t=#*8rI{P5Bd`AoK(jp}Os>B*dLh{-h@ zm}lLc{Y04iA-Cq``}BJ@u-kfD?2TzUkg;#0#g_1B%5idG+}22@!EZ)BJD}-Y0;xJ^ z$OpFnz8o(hyz&?&Q`*J*JEuE}H3P6U5YKT4ATvj^o^gYQ(2fPkv;!p(n;tbu_Q0bx zQRK*hFp}trWj2##a7EqKc_%yDFP_xRj;3ZZwY`8)J(^*OwOnHB%m$X}R%MFs zGh|&frakF40$Rp|IwplWas)7b4BEZb4GI~Uv$m{R+g~;UmR4WgZO$4u>UurJvo~kB zB3BOYi-Ayyd#nv|{p!%wSHdIf ze_mZvh6(b-cHCeL3fs1^hydR#dBXbB<*iSKSWK!{l0%yJ5Pvlov?FSMk|x=ql9MGw zwPJ@vWz5lbKXJI(AoW!hwxwlbUy~+M)wpe`Oc{uLFsz+ed_}=AtXc1sBt=w@c+3eO zeC#MkW2NLZQL?2MX`}q!7osV~wnq=S5zv!}BUS7G%dv{m2XMA6^8rIKLtMP)8MutY zF%X=y`qTUGTg_MWoO1eG#wN}vF<(`b;Zxb{n~Op~x{pLZ(115QYye|7 zUQa{ljVq$vImaZAw;NL6)rEHD;*hLdNzq6(?^G4UN~?PCvV^jTpowo;KUvLA(dr-c z4=s3Wd3YB5XraHz4-iQmGBWxrQ+=O>u-a8qNJ{wC*Tv2iVbZ~LL}}PCt=T_s7=d&c z0ir~6s%`sf1NrV_Y#cpzT=cmip>w~M0T<)oh1%h?!wY(CFFuvfZ>mGaQzaoeuTe)V z1lP4PV0?0!<*IA~y-E|jG>y{~!!`R#uJ{SgO{WJSl6vyc1w6#58y;OzVPJwPx1o0& z@nd5>kVD)BuLQBpg?^s{7prJVMhlcFrH)!Azdbu_s%T&#c4NsaHk_%T9y6oMo$P>! z52i1-MVz3hA<4|t1zS*1+>f?4g@M3--&CYl zMIiY3Jdq^o%teOpd;CznbqRS1eF&i=ycZu0bj_oz+{+NxlDF$|weV}KiIM#r^_#pq zQ;wvL zs-`Qg6OYZGdA0uNg?5#izfgZV#iK^_JM72h-K%BzDV-9ALsg~WUK9sd={KYxh!=+= zL^f417UtWhJglYn3NnnuKeyqmgqYeJ^-S~vw<~dCj6BB95SLQ>9tlbq%Y zm<_k_PgcxZ7d;3Q#|!f5`E|15%iUtG#G3&)TB-=*+EH+TUYl2O(^cn<{KOxXuPww2 ztfo6$;;-%JY^no)qS3x9Z1l@V^dV7&k@D6?oBF}2?y|9~0(KGoTByqu3nFG<1KkLL zi%FXokL)P^wyd1&h_zVX9i4I|EIqBtPo}mb|r_&+v-qnW_Z>2O-?IUrz zH|^zwE4sOW_a%$v9IvQGR`YodqH*TvwIf6coZ`lZ7Q=u%|M$W=lWct zcXie=!X9^?t{6T&h1rg_xx5U^t;L+SqS5LH<)oX(q!GvSGbZ`;B2(57I4UR5KAlu! zCgI0-ZTSQzJVeNPZ!2wH6*b90n&O6p5`1Vg!l$~eB?3uiJD-SIF6$P%Q$YM?g{{MR zc!Ok8Q9a1yXMQzpx?-z0NeHFaB+IQ^;2zg`snUK8DY=tsb$HNjI!t^e+cpKKK7!Qh zZ0s+~5)ORf`m%{M1vv z*ZlYGP;7!I9cRM?^OUSdXf^h6ork|qO-sX+kEt*1eesRKlIntBipyivOUhW73WKIb zb%g$a`IwIuGfvF@(R+t>f?K4?p0&rW({PZ%=;B6ADPN;2b2RCvaq}{;X76Y!EQ36~ z!D>~lnV)akps?{Z_o&^XGr=_CAIR6TGny559|v7j!I_*|EG;>b$~+{`p@_sRGVg@U z)AFK##>_YZ9OTAS6jp1Yv%~#aiU?!*ZG@+1nT?L;pr6}ATkky$br{32lN~_s_J&EO zXn9=CSQ|q%3tg$QwTuWXb4|n@GFtK}e;gw6RmZw6hRZ2@Z$>tb=TF5w9?p9JL1IvPcGRbxajbtOOcVg(;5 z$rtF&(j~mT3gtg9xNui9n-ncbZjRWeEjmWX@Yl}2Q__H59y5X3?fp)3jv0STr~(K8 ze49^{2?J}h;jXXxtWf_@d%YLuU%p+cai}|zu{@%$BsSfch7IM*$C+SsIvIY;9)=b{ z@wRt4zI_$wwchhV00BxU_nzkcSy5jw|0sTHVE(%(6#*qxlhtKck+o(QSL;YF41tjvS4Bqxw)24SW;oGT$3=*e=_+oH+3j*u!)u4 z)qxN1#|P>SB(3GlDl($8QEC*WwD1|U-x0!`K}mvS1{=UoA2E)2r~ptHQw)8pgdaHR zMiEBm{_F3>8)t;;xow4`8Yc8wWNmz(omAXH1NVFK_wb~sb09h8UceJf#J}Pqd?tuGMZ4mLuE2e)sS=SNAy$vcF#~d=k02DW1Q+pM@(K8=k|w%;aGIgZ>XR_zo6O zJ>?_)<$jR=f?TYi5KDP@H=I#FJGSh3Rwb5k%t z!Km2;Yvl^^bG!CZ9(fx+#}8uXkf$*j4|Bu@U?V{sXCvf3!Jb9_6AUOel8`)kR3DUF z5yV5oU^>i1)d*l(al}f@#G6qqPRnA3#6$YEN0k>F?$L)Hf^J+fgMdjWJ%IouyD}W( zDK&E-!KRxqqnfKc4iYRjh+zuGZ=@(FDEa3mI%BkV`I9Y^=C8$(e*a*QRW#LNh89Zd zE1Noa$I+1UIkS1$*O`l&0EZbeC2S;F_s03jgif2dx`)Pi@W`B*2FZ%<;Q?5h_ffi5 z>R^ZVseh*DORrj6NaNR8EE|%jk~_Cg-^-5!OtMssWdn+=4<4^t_=nXG0{fmk=Kq8f zPK;r(cztItqvNU?R9rWFgZj_Xf;DdMy@E!aWPt))B_xunurw6j+ok6Xl$#@->aXhp zFOcu5dtBjY3h;(Ho=1~^o+EzbZPz-3J@a_Y09@o;QWO|lxxxaOvm2oA)l0)Z&04MY zQ(-7C00M{6aGi7#Cjw09|7jvy=KBn-4UW7c+s{6 zR+OkIDmSA?MB zTSZuWSEHeD6n~!W5G3zvXlz$moeJ-X$Ye1-L?BXathCgS7!DuPcq>^yY(euuGfh=h zotx-^xW6$b0((@*i&?a^sz43g_4%Hw_|VrBIGH4OANgrZ1hlmwrpo07i`lvOgH$Ud zIh!AI?7E9|Sy^G1X~vOq^5TddTf7 zpMep3eeBsw@U-f`M`fZG`s86xS2bLXnp;yyCXo3bExNVH}!7e!u?A& z&_gdFn~-pIHDWItaW1To_ziMFbvXd-(e%{%oC``PS@4Mf*|>owK<8jH162K%Qs=LD z#=C)IE)VZd$fQ6VuuEO&NC~;DyhXRuPqtTfkQ~N9RxaHbdr$l#QJ%cpa*Qwv)j$=F zOkJTE~VdmqN1M9c8doc#$@=19}ii$kb{aH%Lfr?KNC)dvQmZpw^j zV=)+}?U6|2UYq)%r8>-!?m2I^+0kb{;`aoq9%wYmL>Wc7t6C?%g#v4k%_+LGF4AV2 zXXuA3=x@Y2Ee&OElI$gf%Y!C>C1Ca%qYCClpZ-DsZ=Y;)T@MXBmuX|%Z_LinOZb*( z>DraHk{!d&VNvxPp(&b|uT&PBTBnG(L=68p<&}1}hfJgVz(S;hd=r>lRh9&0ObQfp z)#(%7kagkiy)uO$bvNjcPFV#=R@g*+7`J7#H`Lrh0{{esj7}dV*qz$t!_lZ-njbfc?!N64GjT<_UpdpWS}9 zx)jDZ{n64HkZcv0H$WRXB3etbW-li&wS$fXMOvyQbH_2oV=|nb(A<~+KDFzW^x?JZ z+C_e4j^=yQM+NIOK0_^{`BoqoMS|rUn%T zH_hn0*y*S4gSTrp&VlwaUN7+Akw+%e4p5EW@AMt}==g9a_ zoQf#P3einbRW|@87|&2~a5%>)Z)8vdPzvywXuCb+sAPK(rO*^OdU4Bgvp{q+92q-b zq0HqH1JUnR2y!e1j!trROux$S8j7(k1*O(NF%>xKh>hZpi=T@ut8MUID>rnZf(3^b z5lT}!bdFXQA8o0$C}sR#g;ZZfG5x!N)mmEzqTsD|MAbBn@2pG)o2AUR`zSM3l2 zEjtM_A5l~OSBKNO_0O^9%7Hkj_}FK!;^t)Bwp55$6IHKbg8y_~pHD!fA!So1wPixt zbZHq85{GN51nW$ifa`qliEAK(#h45+SL%v2%C@@L`Vco9JGn#78W?6h5$$B)(ihGI z-l#NTYSg1njg@3^Qky{%5#659slu({s9&v$88953{iOwN)MKQ%8$u!s_jDwpL*X&7 zSH_k1(j{(GTmGqHiOc;pj~6}K@a4bs_-msu3`bVxOn4Ubs_9{dsw1ait;R)<9h-ZL zqG{U{JcQGd)oei%4kUTRx^5&#v^(o`U_sQHir26*oWgtaqSuAUoqga#TkFt6KPFhK zcYEMi>qJ0O?3S7h%rU*mHrS>9l>xK+DVOufQGQZJ45SuB%^V+u!Mn^d*}soJ0wCuc zooq-foIVDGOrGCg7oI}qIG&U=hmNl>WUUeqI1qgeq>+_+1-3?w2elQAN(e;QTTF@t zBxf9$6FOGZ8wlu=H#-CsI)*s!YkK?Xjw_Im_ zch_#`CDE6_))!-z@9o!8@ajx2fb`mHjRtoMy>^2>VGrEsEOarTDVaCKF1 zUfsoz{{gSw7fcH-1YvzN<$M%$4S~bzPQbw`)+*tVHQ_k}s!HTp zj^3(^#UX>&rr(S*gLr4e8iY*Yj}n3c4ocpxIQ5gwQg*yx|M!3U>Co5`RPP%Iu_*;& z^+N~t2&8kAXD%cSX)w#S$JiptPS~-qmKBzp<&vtqo&%UFDD`y^izt^Xe{d#h6OLE$ zwERu=L#k|dJ82w9YBxt%?;d9n!o3#Kw34Z5)eJc4#s- zg1GtIc0E4gsvmQ}xZmB-Z$r{WO;QHu!QFW12+4|7BG8B*Gb8Kh%yA5?*dS*_aOp;z z97t%fZZUj=Qp+Ee(4p0KCw^ab@HEl8@{INml%#F0vLt!2uC1q`hEy=C^TVya&9KHdW8Bk zznV-xd;j3?SVAQZW`lniR1k0Z%)g*b@y#C*B9|w&O_Wi$nK-!si*fQ7HOte0{&={O z4$%^lMqUpaYDpqr2fEA?`eZ{u$eQ5_ma$@pVcVn{$?ciPriFBdF_R&d462N{QJKW; z_>Xui(9`g_Z6|MVJWR`sAo>Y4gVTf8&3HJ1yOXNOVauq={D~ST_2sS71!?TltG$wH zg4i8kY^|i#7WFKuWv9mbq{X)a?viiS+g-t0w$pc|DEc*Gpl?jGI{B!7IX)nfCPd!;C!em&ubK+xi?rz)Vfq#m%PQ#6>-zFi0oei;1)JCm4X&qO7{@rA7nRxcxq7v)D*-aR{%4(w>ytVtG_tbUN`&=*&QzcVS{T z$h-3bZuWC(uf4m?d*KdnRu-~n??uF0bycnZUUZ9R*zGL&)!b3xc;h-wSPd>7Rgg1~~5Wx`H|?Fbn{u7LguFT<^9)CnK>@s- ztdL+1>P)3i)YKP-J*kVHxUCi30L!w{nV7olDvQt^UVK#172A=x^hT`V=IoqH_+;2zw~4VG|e zgaV+NGeo+!gi+{AB^UyGsq_K6(|$cqJLj1L7`;bbwr6m#W(cknv`09zpgzPd)A=i@ zaNtXkr&iyF3#`PRzkLVv9pt-4rVHI&^y_}78bO>bT!@B z{~~(+fX*JZFAjDGx(-$bDmu@Lpah)zx59l#vc=C=(UU9qTc|V!UZ8=A0!OO`FZ0WH zn_|6gu(`5X{Z{)0xy^+Z4QAj6FpP${u|-ou4HgaNdSdF254&^c+uygyL@=&f5(IRw z#>`oH_~(ZPr^4cXOLR?iPV{HoIsLh`UM}%MjV*JTmd;?4%u5+F2vMw4elfp;Po%WLZI3#l);b{Ej(6i3no{7> z+s+0VlbEOPihqvkg47G9{Y<{bzhJ3Ug$s8gu(gP5Nj?WMaKbUNL!4iXi<(E;| z_+cYri5;AWe~ZEr2-(&>-9tmYjO6|K_+2F0`xNK{@$%j`lw z_tpaL(m&|Qq|>p2@zCU@?#HqSsbG%_WAXK@g-%6{)f#TKfLjGSz+ki+KHsPqRtCdL zxxP)%+Qg@lrRn#3DjUzl&*$S@^F%1t3FG1@Q^0Ez&e2{au;IKpvZgGoX1dO$w|O>$ z!ac%D=Z91xpI|xe>m;tPr!0=IQd6UiHK+tXNaIj2&g%eYWHxK*|Lf{3fZ}M@whh58 zxG(PR!6CujJ-EBOZEy{?xVyU(+}+(RSa1Tt$-i&Dzs`GdrfRB&;hG-n>FJ$*p8Njv z3npc>g&e|M`AZiAXCIF?I-hj2QdJgdUFR)MSyA0DkVz!9NO7P)^wFy3!Fcz z6Txnxan6*Br$N6%?*)5jxJzJDjX7<+FiS^ul%w=Y==2)qnoOEep(aeGUT_J0a%~{? zJ>S`D2HJj*GIoUE8hT|AlUCwOgTAWP5MmaMBmY3no`U7FBh}ea#2T_-b$%Ld?17je?GvqcuGzO zjeFxBJ6>H`c>sH%S820nSNk@7(UZ-%NRVy2mwu zv>YBqBS2%x`-DUm-A+X*0-u{h$A_BXwbCb=5C(b3O_}-$S*Dc+7>r~X0uIMd!W1jV z62&twt|D77A_Q(R>~^8y>;;e#Bj+KYhSWl#{+yy96Nns6y}l;RHXH*Lc_J$QyowIC z@OtLm^6$c8*^x$IsXvy*&`-XK{9^l_@0-;vU$Q+VWrs{+6NvV9)ebLxF$vt!Zh+CD zC{D7J*RmEUYZ^)bdfTM%vgQ(8hud3nmnK(fbAb+6aA|)r!e;_ul@&uxLqSsR9GGkF*IzLfzA~pR?l_{&+&-lwMMyakzYJcd_VxndB$mK_S z<-QN51X&v7T)Fr$J&Sj%yVj|8<&f~362m0($hTNgFw1rWO_7)D&Vz<#TD&;Q2Go~h%Y{rm<=}CxGBVrF=peu&x(GpOJ_n=y|btv@Q4X{4s2v}Bit71Xi^+1(jqHBuVP=T zRV(gllpAu^B`<(e4j7t$PMj)H|7?<;13mt7en@%Fumw}Su(;+Xv$0@1$VxP*++rDN zO?$#;nfKWx|NWBvTIJwK!z)kEMJMw<3II!M)rxklC%1u#q8PcNncXr?)LS=-!6PFX zga7LZ4iLd%%oE9BTq`O(S~Psj6HXc75>Yo3_l9`0nXaN^B9G#^MMhkK@7XuCQDXA< zUTO5bqgdZFyHHOUyHMY!a%_B(eN22wUOXbfvcdw@LG#A-v08I0jwK*+j>`GyLp0k1 zfI_wQeoS(RwqGc8A7cn6(RYw?A1M>9^}DcsJ@DN*N96^b$WP~)bPG3mc+Sb03Ll>pEVY2lz7`86Zh-jN`RZUc&5@0h{4u^1)(#7umXUUIl>WN}H$ON9eN zq1&b+BYk9{1Sx*e(>L994Vf#&`3G;-Xd!Q=75_p3)u#TSy6;fq%@kd&5v-x<{aM;O zuqVNgak||yqu!Q;hx2l@o!OKBd&&|LdIr&BE~(t2pWmkdJx7;%+MB;!{gU-1n*CYU z3zcpkQ5SBWLeFTd-KaHAp3PJy0==oj7)sz3z44@8%O_E>*sWUU1HnY2;mrPZgl4#$ zc}>H_!VX{7yzLg&c_#tVM4AXGx-20pFpND(!>72m==w*Hh;T&TEvN$TVB^?QeX4UR zt8BB4!%e`W*j}n8Vcb&h%lPx1_ft&)7bDdhQ?{1!Mcmr72 zJp7CvT%KnuaM>N3i)FxPI68KG$&6Sf}RGz`Z-g>@jb+PCYI_e2`eJ?!ZY6 z@vJu!Q>!)GNa*=kjpmRYZ4Yyg4^iCr2SKu+c-QW!o_-!$d^Sf{lY4p^-SOvVR?H0J zVo)A}_$Pz-o#7xF7L{P8{Wn6Bb~rbX?%kKlF$7ZtAc7R0UqC!sjcGiB0%-N<9GL#T^pXG<(TxeI)|T3CotHKl%-Quw(jzDj0X zCHyjd*@!IvWle!5j3?r8Pj+nBfwp{mkNKJ?bnt$K&Og`VId+A`e@OZosSCGzkLEff zN)s-x1yp|p8|gykZisFV9FPw_mF}xuo?N9!d?x-{`_t*X=VvQ^%R^dSc>OunlGHJO z2nNFGo5&sA$i?0UyLs0nYM%HI<$IOZy0SKUC3Q|%CtDpn1E)=$^J_B<tT6Cq%^QYy<*)Lsx!I*VqL(lo;nf7P3 zm%(sf+KSI1MmLQK5oHO4mCei7b#A4JUfYFYsD}$6=FM ziQ(!FpVuiuPxN(w6Rhh#Q25l}T7ryzVD%#qWYuo;6V1H_III^Vs!rW?uzuT)%qpf6 z2({(5Fg<}v4%R6$;zCK_6k7;BHVtp2DHK0WqpyV$LmbQrzlbLwbBXgx6lh`V*%GzD zusE^hWF6*B*pWpXKjw?AL;Hp$(j?+UQs=;w4coNPZU?9W4$eA4;`+U}lOFMsxTb?D zL=r~Z9RBF!O|o`x(yA!LOo-R)c!E|!nNwA^cZA7Jp?A*Xs{6_;{Flf(i#6p|S#z0VZhODcIb*9gwgXE?#CS$$=z_SAnr z$rVb~Uz~TFlS7dU3Nw7xk)GA~2aQHA`~pj5U`3(g3})JZ@ZVj(&Z@fWT5ug6g9XJ0 z(}1pGh=F#B^GaC$7aB`d4EE#`xW2t?rnWtXGI6*DU&6^0IMcKPw=bIc7(UC|^Uvn& zv0O!waH4Z1{47ObZLkf5pf%r<8e_)o(pf!VKDa{}XCKf3@yC2>4e6oh=uA7-12Mfd z1Fhp|HlUFEm6~rOyAMOCTL?nx8RRPJIIlCwCd5T?Hj)aHSC)$Hh%g3XvD??8m|OSu z&3?`2H@~J{deVIV8lcgeN43iopY|0y_wSy9xWRrATegJ2v>F+1^%TCl%w_A)ebYnpW9Hbb z9rv-e%{P{oheC^n*&=uEYFAbT{Jhv=g`9ab*n@DW(L#WCWN_XL07c1SI$o1p447)m zp0W)!URMKuKz)z)_Wcs(yhuP+)TBVF7CDqQIUjIN=7SCv*Psx~Okq9aU`b$6j@Nxd zN<@|PMIWgfbH5Uc@g#4JW=OPXjYQ9}!1W_8PkoMNRlbWi+eY&WTM$}cIvGV)C-#dS z5HkMURpEELrD1KI5Fk%BR?P1jr6BoD+~A(CNk)U%Bpr>dzJZcm5@+<-;qN-lVYT<%3$D{u;C7k&&lk6=7+BDcc&>N$<{+{J zCLrTZNFb*<)@_s|iMiPXT^wsoZa55n6nioj<)Bc689u|iH?g7eXfAv{(dlni8E>$n z0yP)xQdAZ>9LQ>N84lcpJ{x`}XWE7MA+M!2IB8(bpA6kb~Hwxu=YT+K%Y z_;-qe1bN93^H!tcp`(^zNDuF_nv4)@^2P$3Of8d*mYd2i)7|w@ZK2b~TJSx7z4x44 z=I6539^!-2c^Ik3a+^(FdG-q3aGOEyRBx~+^jBS|x~U0H@3OK2Ms^bGMWN(tlmiv! ztFBXuE#I^xA9z1yiNFTY2@HltTOczm+xjYv|3W7?wMTp5#m}I#-Q}I!I&f#Tl?Y|T zCuUhEZV{a4j`g%Si+~qH+^1#Rh$>I-F~g7iE%*u~Vmt=)OSLS`l1j$HG*wT%lEUi^ z_jas(fK|UZ)y$?cG{Uosb!{RqS@xh|W`S>MZ1f!98 zI+N5N_JoOFd=Jxmip*i!y>zLNgL1H|g_Sw$BX3vuQY;TTaG3QufbLfVwi}Sn@O=T| z366aWyY>)KLhupP#vt^g!ffwFJf&GPZTqv4JA)~ieqo8l)GW8`Z_L7&8XjXl_+<2M zU8#B*8^%i1Q~jJe2*-GPzVkAZW>%`Cv_f16XY_MM z^o&0HpZW=tjdXKZ3Ytn(0G?0}Q7%e#+b&=ggjeiE6~cc|(t+2Jy3TU|nXV|TOpKis zKH$Fa`qAI^f&+RnE^Xg^A|E&t$mI8!7EJNE*bsi{hH{Kn6k=bw+BNPMo$RL;{nem0 zm&s;I(5{JW&v1bhngq^IFy=f%djH#_T#Y$@G%KGF`>izso4MaW|x1lh(3py1$ zj@b?M$L?aFa%I?)CJ%R>i*dsXPQRx>za__S+}n+-=sM@9u9yzz#E-1YSIR(gGMREa zHi{zqT{vxI;?Z@hsw}Kg$VGr?hS$Zp2K5#OhpuTf!=_VS+g5ALhIL4&SX^#;Sks8E zt}odKj|zU(uUJhi1BI;DA8##3Ud45dSJH(MMeT@3t7b9Xjk*~D{FrS&W|>GSqv{&(QKt+L0pC>;?p|?f zXqlrCuIi9m8$TqfR_?e(R%prAS^uOO_vJtrrgEj;JM+5!!JW#Aqgc@5nUdV8_()PJ zNSl)P6Wwch7||%gF$=9|gW~UoMK3fAss`M!bbzLlg@MXwf*$MjX4*1L2Kltmk&Q@Z zbombfkqfDK=yFQPZ|%0+W=`$vXUq~LgEUtxlgTXO{u|9qOPgz5OPR(!x` zK3KEdrio3Z|C6*a=;g%MnpCx+z(Qfg@$pDqzXZtV7h>El(Y{P!@8>y4m=F^QhkP;; zTcfT7fv2xudiRqq`M@(#u|Xs;B-LcZfHF2O>T@zt=;O@`W059e3P5UotH}+>U3+#E zv(o<&sV`b;)(Hk2hklD#S6Tq4ZLCi}Tk0zCt~0v#cV3DIQHwPkU$&w2FnlP!mh`Y2 zas|+HOz9hzTj-haOH&omdgN4VOu^6;2}S2a;K~;q34!oF4~53bnQim7!TZ}QuL2_< z_55|_+`+}Ov|>JQV)W=a#ne0!t-Odi9_jH2y7{z(E`4toKU2Geq}+yK9+AFOzK?UM zLJ;dIWv}c^(wCq&vf^A2Rh*GDavYFTp&v+E4XJ`@mbxHD7UH@l(I7`_XE^&1YRc>u z9>aq72I=+_-pa^5A!pVJ{+uB+hUMFo?wPW`i1QCLEx9CtTfCOheON#G#E2Gp`?uM=V4?9)Mdjln*<~R83G*qgwtl5FhXP*=I-%a4Pb{~lu@a9`_lZTtiVWQ(I<~B<+91X-d@4Ji;4t$V-pLH6Zc{HvI{wyq@j8qAA~8F)y|p+@QGsk}EHf zDm_P4G62>hQAP$UG}A?3U=x?Hz6(tfqJ1EKhS-;nA*<(3KOY(BN~)!^w+^)Uk9zoOeD@em?zSp0Z|}*u4Y1CRO)U z^SXlbyxCjsjlyMl!e#XN@R>}cl#;l^b^7+!Xa4862Cv`G=ll>}8yaZ+t%2|5TxaNM zCJI?q_eg6D2{@E>zwQwP>Hcahkz^Z20|Ey&>o2M!AcCHHn%z8B_YUgy%~Q3UbSW%a zJ1kGy0?iJSvs5D12eIduDq7?D9t1?g+hIkW5AcD#E;ub;H`0&Nn?Ffj_mL$VJ)GSWZR&Y`zyc?`k8Wo& z$g^R!K|;x7l;Wb6Nmxubf^HGM&|nlf)PSeW_8+L=hzQwg}* zDi3bw+2P@}4nj3Lf+(K&Qzon*58oA5o>SZ$!aw*BX;lZX#IdVi0lZEquTV)c%c&Ri zDfjb#lT#1z=V})MoWw=esen!H=xdURnZ6^X@HoFPoqknR7m6h%@y$hzLoUP8Nj;Gl ze>EFM?QxG$yXzgER&%jdA9SRe;SHSyiyX~$O&KR$^HUg2Sd8a`URcHVQbP!Eey29@ z$gAs<#|GnNXzQXf0RJ(_{x5&*5h~HvA`A!!W%_@F>Yy7jKuUACV0Rub!^Fj=-&qqk z@?E~R0T2a@=oq6i5M&}+P+~CB5J84o17wd z1uOK`(@R~;UCRgReqDntjhAj-epM=@mmZhg9BI<9n9q_Ioi3NWN7*hHyxl$*DJ|0- zj({FUFD6`mM;WdXpt{S@fY~t@G_}AnmpJ9{zQGtz zY%O+1#n5s1MX>8yV#H^vZJGl4=w!A~tW-Q26F+u^SK=Ci?tGqMv69SF5Q4RNzBP4O z+Ncm+A+vL~Vlf{3q8OX^;*@wORtD{-8pSCKmU%VRd>$EfplUI6B10Q?d0Aoh`|VqS zvIP8ceDe`G%tiL`I3&4=q}9*~w!*am-?kaCuM_HGKatDW>a@I!x*~L%RG5&EwJnX? zFj}R|A580R#xR;xPDRDe>(#i%<+PdItX<>DaF2|dboW5mp_OX=vWv+TzA81h_E)3U zcRpXqY~(rVfW0ZBtTBc0lDQMs2^gYFkKdimmc21Qn*|Z*?SiNevc4AECQKIF#xwZU zt6TN`Y#mGdV*69NLC2PD0OzA&6@1wFalH~LPJ>BcUB&u-uT(1=F-qv?6mj<^xcmT5 zO6w8#-tT4ssh#A2hW+RGITy0{~8|ZfSu@C z>wb}iHWH$QS=tsEZ7J%I@y42V)z{`9RBI~jg7%VGtEpuCFebMX;=PuPTPt;6z0VPY zKhqkPbqZoMJx>y1$nfU6dyraNoVClwrLdtNhMsI;MC*5&Z$>Vnd?vI$J#DTo(`%`| zsD}*zE&^kExB|*!JXzJSn?9cls@uGCvc}G4!9#aS)LTnSWF!CjO=w0TePhVbh6ikv zI@kAYVi9I)T8VT+=Kwynn;yfi%K9s;j)=Ru3$0dCt(`inE&te*fmm9>yF4XR4?oA) z%>>7kk00jV3FbKI>*DJb=2MgdnNWw`t&4ofI@SZCA;axQL02ct-Mzc7mx`*oJyJj+ zYHwbhb&FJr>!zOrz*nJ)1^w_ao+ENVQn^V~R9<%akaX8r5x>)yAP;rUbk6drp{Z~* zFSz5dD~FRN3NKJ+GaIbX%!l5)k-%AwEx=OrFw@-7*oQEUXpU-aI{SsE1gnO~-pKZ+ zp$01uo_thF_5Eaimke${S-SL=l(B>6N9hp+95&oJW6AaYm&}~>DNYdj+}3t2V@cAa zrz!_EyYtgdu?2zP2M#U1br2-zoU=Z-8}Y&jYpDZtlBa!l-Z&oTt}jTSrh%fp5cl54 z*?=Bh1|tBc0_C1cLwOY9(`w(wLZAkRVhbveYpEf^2`*nbyP4lKZ1t=4Mb7V>Y*&P6 zsX>gzX%bTMWCYc(?wQEArc;b~h-lX;>{oGQniaKq5!d4;0UE^3!mr`h>+5L<3ct`T zDy3i#VHqBp9cIaZlyE&@H62(X^UhOSQR7i*L+Viy(zm2gFCM=8&hA(#O0+T#KKre0cTZ zJ~*jAueOwivtt4(TF!IPLQ=iilKC=$Z$4L!#_r`>*Y>9?U3yt_50HdRiwxH7=jCD} z9sSSH%(SY-)n*7ZTA7>7;_PDcLM~$^&Wp&L-0O8(vzhoD(1TP#1!8nHPnm)oPdrET zQ%IqGytx9>k*OtO4>)m?Wr!0)Qj(2nT*)Grbx@S@n$C=8!$L?NyY@&1N(DZ^Ybs-n zZJZQnW4n!^xc9Qv@tZhZfi(z-a6t!60*OfXt?8T)LpW2y=T_xZ1PH5Yq-C| z%GxJpcs6J`H=H$n$7xGqo&OEb)Fy9V($2uZ^q9FpRtj*Pzh&1rcca;Dt#_VJLTX9; z?3}V;Ke~D2C3_LiqE)Z>3sX@Bh%+pMiG@^>qJ1OYrit!1n(+MH-?&&`bKDx+U_fX| zETV0UX?{Ev%E~gi^6p3etkgmRT;kk5Y_SyULVmrZ<#N!CnQRw193yWj;5zQC<$J|V zEtLW2ndyWv_!OEV1zoj)%0A>9N>1DoF?DLG9Cx#t>vp)Y=DBpcbo3xSPiI zvzXVt@}hvbbc!#(mBl;Ne*W`hF=%$dFxrma6(b452NB4s)&;X4OTfM|6i@~EGkx8A z+l-Gf&O4x+M5^s18sx|95QCBVWP$luL72T{feP{|JpjXd3+Qa9@MHm+7M+_8z#;YE zQ{p##sk%OqLBmn>BpZ_kplA-6B=##C4w6+9#-}PSG-LU-x+o8`a(#1@@2UDcRXiW6 zSn(K;gY(^UiyCS}JRDc-)2bUiKee?DC*Z1oSYSj7?vM90$}K8lO&B61b>QKhKbslY z3$W~P^kCi6;Hh$6b4BvlQ4rOy|K8eRJ94isafWS6kU!8T#j~Kj0t6roL-n#^+c_Ww zTPbj^=&o!GVcM9758~xmvuxUBeRx%cTH^H;^*0=4kcmg2%D^?~ZhQTwxfIxug??@%-NreWSfy7MIUE8HJ) zouA)6GHG2xs|t~180oJ;ty>}u5S$4*z2*gnr0`b)qv_}-HGusw8`#Gi#o5!do;RHu zN(n{G8>K>Mx|`_mgHPy`ihl?*WY}`wAOv&MkfeF~!HmgSTne%ch zse{4YhkhA%qN7&>=3}EzL8H-R0^YSF={MTIuJO zLZ@3^5$m+>WkBy3Zzw*|2tsYMoTB#ycm@Iq-dHK7>VBRTL>7YcC0!>AB4nR;6S+;p zKJVM{$(?-ZkZL8vP$1rsl279-P&hfVR-bY9Z(bS4Sz+FqK+ayWEG+O7RXPXdWZ(Uu z2#jp73Zii%Cw~r*xLKOF(YMziMD#2xN<%g3;XGocS)4x-xmbo z{Dd2hu!_;bk}R+;N>tbUeQY+uvZcZXMxqSN5B?4K&U<^!wyX+6hB8-k8 zBngfJ)$Sl+s*;8mAjFXo95d8TFK_ve16oPJWU_}2;_ZInu8cd+?jMpdnz8(E#1aHT z#j9Jx2C}<*;p1kKJItp6B`5G5Vevns4?A+s9aVr3E;~cI(IZZ~kZ<(>Vu5g$hVAi| zdq@=qFQnC@+G!>RVZ;K$t`gP2j`T7GB)n5JD(KSLPXYsXHO^<|zgd4j9>JYUu_`U!BEux4t4SDwbY9`fZj9eDUpOv0_&5-L&@h4l3bmA=PZF{DF2j zNB^ecj=hwcMm`xV#X;rl-EY@6x9F3P6jc^gSdpQ1DROP-O-HUuvKk#)&A~z6&epI8 zv?3y-&(0QyS#QFd7E6tkSTH8i*mG_^Mel(<8^Asm!?f>{DDI~o+B6r4Wb!1|+=E+* zB-y8biA*FhBz%iDzs(m9?M-%CVaA-L53vNPoW&v4sS=c~&668s^ZvIsKumrfc_HJ;g24>XIA?PdBFwqTD?m>@u~~wlsH+Yy8eMb zofZ1VjdZ&BYuys}O+3jU6*RNJ1+rQDx*%NS-Z+-Z%x=G0fpD8Y1H$$uA1R zpButgqKG?=!{cPu>>ceJM6xM>spN?sjn=jFe!JP|j-QWs#dwYjz;P2Kh5L@~{e z4Eybf&3m4)hgl_Olumlkht#QON_C9X#JQQze!P}evXqxxb$sl8I0i|J_28dXctzUa zQxSUpAjF4}*`5@*mX4-8(v?5#lND3K-TekVEPXUYi}f=Yb+)gm&@=5b@EgK3=3QBP zV=(2Dy}1zW?H!wkes9IC+)ZUx8fK=lg&_8Ye&ij2=VZ?*?3N*e>-sP)Uy(U#&Hyxf zXG9BNxovBI2-^P92=(wo5=pNBxf=?)uRkWFM=v^~ZK>R+M>fX%Y46%pA$^to>IM4> zGF@uGeGCDWz>(X)0zI27kWM0qxz|ivWyBe=j3OK_CQ%n?FEZxMC9?rr+*d(=Pm#KJ zZx+;G(i-0PjkV38&2Y#0(!}53ZBWt)!6OfmA#r6nzQl|qIKDdp(;is8Ef6ZJ7wHd6 zA-O4CkvJ*CUlU1CwMoe(V`+iqiG`&>B<|M~=2WOf09)T!qNTwAWC8ROOjdIXw~zy? zo~d+~zN;X84IHwsEco=3KH-JrcW|QVg8}T@EQJej@9m zW7c5!DUL{WP&eRf0A;U28T&KlB3n$kw~}8yCH4Dbzsq+pT%wVIq@GBV_w7QWTGLjl zjrmi2gk!pm0E8Q_LLEUQ>@mV3=>B5gbT9bOFRf#^7K);_Zt`K@6L`0_6ge%$N%H}- zNfBFah9217Vq9$Kl&8yb6%J7)g?kn8Z`z#S0b_xN%?}4&z?W!(R#sW!zUhb;d!1hx z{&_!@&6aI{#bkHOojk@4NLkbvI8DVnkyGspdh40(X5nqghGa4NZuh|Pq5qCJfg|@L zng~FG6zm45b* zB51>LyL*dvayKKPR?s9BA$dMF1HBO`bVuiJ(;wI2->P$M4P$KW%a%D22`$vsg|DgA zPQAeYR#1HARoHN^M1O<&UFPIK^%g}_rDRX%|8?6k3Yg3)^s8vwJ^c;n+6lQta50Z( zw$tK_80rqUdEc&B8t(#p&E}TGb@+UA*qTyO>g(Lm(Y5zLuG!SyBY2JGu8GxZvFLg0 zwkw@z`9^GOH`YxbUzt5r01xQ>fa8xCmkv5*dR@*5sd{PdB&>qN~Vi#sXkg`dcb_Wx5)1s9w=8#J)u#(U&L!Xc^(s0+^oE;0K_=V+$TkqI97GlYv6x5e;m!d*g;5IaMk(_ z(Q(T1?086Fi|vBBRd3qJ+W(l=T@bGQ^7>GX*Cmx{aZU2}rIxC16qNp*#d4WdC{U?W z)c90u{LQ+HuHek%?5H8O;nEO@GI(y@9hRAOR^yh@8uNOWNizHf`-{82JL0r|b$h~} z<@xglZ)?)4U14|D>ycE8PRGM%hl}6;lMdOyc!Bf^j(9e~0B8L(15XVAn)v?aEEDx| z@Jh@9)F;#j^^CGJlxq-lNize%5MaLs%Eja0EJhGBzo`d~sEz#RDxFIY`Ksk|&xrH| zXqoF@@<9Ll&)s)9O29{$92ax<6|*qd?um}0iD#iBznpt#_upQZ=iLxI;lkpxfiT!P z(d1u3tSOz;2aU0efEL%8o&q&{s>*8o{45JQ^Q{^|xYr4u9hG|=^g@K z4;+5`PcONFKcqRb7SBbdlpMdkoGM$S%{H6}!5KPEj1@i4@d_rfJ(;tcZ)18hI}}P( zw($yP5O6dM;d|}vpvBu9xkzk{tW9Zjyfu0Y;~RHITZjY>>{%3(S`q!>}3$WU&-hB^0_-*EOR=(TRFaUr{5v z;C|gu-OC6JWwVi(&YX8C)@sjKe19Y$LaUZpMrzp1V99u`Ky3b**MI(yDn}s~ac{jS z!%3@p@@?{i{~DRk(!}$z#*V6HQ)DCe6I)?!TW09sBaj?h()TuYYg;frnF!P@^pnv> zNu(}S{Y7k0F)LYx^*E){?o(8-gl)wN>ep-gZh)00VzM2wg=#@E-OY{xp?|Wv7p}Hj zaEFw9Z#K&cq7DOc4EBWF7+RX<2vN=dS_KIImRK+e0vl^ro4N>CbOPio!AyY9&!^Pzse^H)~1OG+mXCzUH38a4CPtpSGV89A; zSAjn})1SoqSRRhIx)I)D9l0!SaHda7@uY-~Z-HEVR@lxQLkOyJ0f&eWP<>V?l2;rU z-#*Pr3kr|NEYCziVUu|BTY3t=x8{isLL^>!;L998i3&7CA?bnRrQz%d$Z^rKe1P#U zGeYQAL2J+1o>=*%HG5_AA~EQ&ik8u)=_J~JdF2BWwvon*b~-|1a?q00py^)}(aGM! zfeU15h~p9nDdBp7H&F4i(;Oj^rm0f=uX)#-4gk z!0%O%K26Kl9IlzdV@7QU6H+uL{&(xKS2>Pg$f zxSYk0ov3XvwnJemcSY)QCi(O*65v?#RSM7@ z9wmQ2FE_RVVr!U-aPu?C-qTg_KMTTv1YNPYIX4iwa%AvH{58}Xxs*$2srsL;-iu)M zV^v&y+(e_ms-Tm&%q_OSP7l2PPVY|J8Z(b&js1ecPqU;Xne`N)bjePjas4GQz|m%9 zF$zJN{=<7UDtn3rokap2aaA-`S>WjkWfbkswQ%EWo0SLl+d+MG%EDxMzUe=(^G%(IaYI$7zASKz7l5r zrpgw*2Q}_S?}AMCt)3`myb01P-lCIF3i7<@6V)cte!(THsZY8n?z8`KB z4L#;r+~)(J1x-&{#9h)~Wa)_#HeBs^T8@k^e3ZIkbaB*@vVY&_qc6(2TI+-FQ=EX} zb5OyhTaWmjsp>aHTXdMccLb1^)TWdWof54G*J;T-LES6-6Iiafzj6db>66I6Un#L$ zCnXMec8XC+6u05rVd-eTc0@rXWZDOwvsg7W8)tASP_F2s8c6cEnpnMV?x3*n zPkz@-)FEu!QmkGZHO1A&lB6k}mk1jO$iFnvzxMFCaP99Ys^7!qw@PtB>1|gU{dMt` zJm-LWE039i1)ZC=QVi%Z|N0AJvx-phji`NThHY~OKcQ%wqTB~Y4=5kmhbXg5XgWdn ztNPO;#%6Ys{V<>h1D9yXnioM#60Xn&oz`P^kotSdxaX2ty&^kOusG1}qF7FvJ4DZu zrWiTLW#k*1;Dxz1CTTPsanC}PJVLgGrY@ay?O|I*Z^8v)76XvkcuD?hi#?fIOq=>( zlkjax25~@(!SR;KbCojCV@jX*28-^iRE~`@4FX0p@Au7yZO7@0OS<=#HC=mD zcjB7fn(^?C?HUVQxPp*u#lPLg$5T~pr=2i_6+QHK)Lp)&j7Mer`0&(_Bn{#mtkB|o z?mlwvc5N(d%x5I83Z`WW>5be$`rm$n*EWpn8aPZs9JJET1=Leq_=qLAS#u#7VD_j4 zZG;_crd(e707)KhDW$=t9n{-muFeqMZzv0HLwyBCF;S_&<_U;#|LR`_hFO?jO+ zpSJEZEqJs6Hi-j=#isUwIO}Q4Q>7>ZjS-1jvx^TxBS$H3IWbz(dYR;?oIZD5mbNl% z&pWr3>~%%L0TD4iPuMzYerj$UZBE^~3NA|tKAohMN(wPztJ!sgpkZFC~p_VDzV#$508vv@Gt&I#LFlVjE6_%qpeAMZ`72Ev&saUow_T&6? zGyUQf-cJG?%Dma%gqL)QNq6%4WT@+kv7r~y4DRnPldXz&$CMel4MK7gD`Vz*bYQXl zL0uJKzw5`>0 zIX=+>1V2&-8qEAEW&h!EMirB*Pk(nz`SR(9Qje1A{YmXJsUVveUr{>Q_lUCZdGpfd zgFvdct-82i5`cYRL>OxTmnN-yQWuy!99!yIAFP$=Ok;r!#k; zjS$MWPO`n9aT?5y1$w>A3oDZKJM(q7U-rc?hjl>UeqI;`6m z3%mh%C>a72-wyzB_A?+TW!@(WgIB17Ec*`-$$8`%K7g$lSr9>*1Ihpm;*!Av@IM#9 zGX;45cglhV?G9msCJ^93v0$6VlL0iqva@LyD)@o?2+pVn@A1E;0J#6aE^zoD*+DeG zDZ-&`6WDkymIwlZ?7vV5h&_ZqaP%NCv{!&!KB#&S=iL(Ce+t+iqz2G=7xs66BZwoA zAs{&Z+IR^656Cyf3ds633hfO(oKW!LF#QG1lY${aP&Ww{$YLA~K)pK*b_NHB4&p&T zfJfN>Yf7a6g96Y`dw7Q+vSFNetIU5&R~RM%@UPPd0KvafxX~aW$p32-2#9``KTtLp z`e}A&%M8BarVJ1eq<=wy+<%~pVIsi)I$-|GUC91{1V#XWzYGZfCBRGR3V)#g^CP4M z{AFtQ7nrZ~2fQ4i0Q|?Y2W$nv`WN)7`3D+TBLW$Yq5=Nf+T!nP$e{NJtk5O^wS&Qb zjRhbeX#Xk^&hQWTGAjJe*9rtQqXHR^(Z5@_0>zAx0saxs6#(BORdAJK`)e~qYY>V# z2k31K4e*yL>R(WjEf~TFi==V_{?aJ?3(|4|^~M9hl7%#Yf8_S?!B36_c$aj4K^M+{ zpi6tyKfC-_c@7+L@fWDz`Ug~WME-N$|L6=AgMnl4{P%fhdxJp=P|pM|;6DOC;LFYb zS80(xe<0u_3`oHP8^k(^2KYy!`NilE5Q2X}A0z%iT~mzE|725vE~aoH4MqXGUC z;R~)Se_xoHj6Zv1orMK0&fpOK8$bEKhc*M6ouvdP()<%pNe%cXObrfvrQ5;RhyAY| z_vL{LqyQz&k^%k+JKF_A_Tby`Pc!-NgkSgvLe3%nb9jFR>HJH9mo!VjAUWvgAP(T) zZyWzxuReYQZMKtxn&xl-|LNF(Pn7Ymk^=g`5c8jk_rEWJf4QPT(5HF1|9c6<&*QxF m8U~fko4u181)(jl0si|P0B-O8Yswl0=`GO1-jDvX?*9SdGW-Yt delta 47706 zcmZ6y19RqW)I6ApZQHhOb7Gr!Y$v}-CY)$u+fF97lZkEHHs}BByS2OZ?gzN4PF>Y? zPM_{R7tIhcVT7p(~1r?RI5HCZBrWYrs8Xh&# zDi-cU83e7*jI*5Xp=7umi7ub6t7)vs6tit7IlDC@4{k;`PfrP-k}a1f)`Fa?CZ`u1;iN?XcVy{wSPnS}=U<&Y00G{4KklVufjC_{)kKn=^Zo|F%pk zNbsQv4V$!snI%K&S8=7Pu4w6aSCJlOz!Il@vn7p3lZJe{Z7dqwg)Q!cvVpPOVtuP? ze1WGdxbL~Y0OeK2eQH{l=@BI2BAgCmZ9`SIGvGtXx02R+sl?**IcYVmIN>sWudmw- zvMW(d$Wx`e%ZOJfx1{LkV}zM-ZlqJ0{5#Znq#@H(RcK*S9a_ur6Su$M2XYLu>l)*K zuXb!g*RhzVm}q%}RiQXYG2V@S^M8{UlIb(tNX%oP?f-2;zcL5|&)7})?OP*Ol8bR4 z1YnQG>MQNiV(Zcs#v9nzcCUkJzc2ACr!>ce%TlFJ=0!(zn)!9?&9EXQLjmCl0(o{xscuF+|rSZ zU04f0j^Tw^;HXquD1Q)JRl?Q)?S0sv(BTph~Vpk5ZkIZ$<<1 z8tnlmEqjZ3wQ6O>B`4Ghsnjq%K(i@LY=G`>00YU*RfcF2}z0<)Xx)OBf&nbM0=zOclw{1@fTImxx( z?Gv|kbLz|ENWs}!9&c7nS>cuP9R6drgl`_YDiPJUBx{<+l5>gOo)WyL>JDyGCoG$8 zYhIBeyTDdm;`dOHCtbIk=zWQlIG`lDe$aq4Lp1FWQ#)t<${LXSnu`LS>(fYFtRL(d z@Nlp8otP6kgbi#U2Ok-S8feH?k@{)4_Wr5(-5KTYvThnzt>N{@;Zgw$vL+bz;xvPtt6 zq1~iSNm`@kKc2=_*wWGwFM&!k0BX0m+}f>MeeZ|&iFLFhQUr}RG(9dnQQ6K81!Vu1 zG0CrfmVaOOnBnaCrYtBbtZGsR)7kxq3HmudV83Nh%Z{A-{~~oMb35P;Ce9*b4Rl9) zRx0qwOXb||*9BiuNfwczi4Si{Oc+-te-Yrlev$iv`K|x~i3tV<3;SO$1QP?Z3AV6G ztc4Z>!gBgZoXMC4#q1^Q;pF5hx-t<^cJc=?UCsrMgor2nYoGVwNG1Avam~d2n|V^l z1InpP+ncIqwJ^`+H=s`J77{DvSU?Wi*($E30&S z4EC0SimU+sazAZgy9T=x?7hfucRW-f|56gBkQs7BacNv=lJJ-TBo8K{0I>hJ(*G%R z%!%I(1sn`a^uIpN`CktwPHcx^0zxYoU$BsdLrfGEBS2~nW^bIKa8;{UzfFJNMJf8v z;jG*2q1?#eU4vBzc>Bx>Imj7Zr5O{E%xR>H8Q?cmZ7~4 zy$BCFn4D~YqDLHO%J-t}V$pO1zqkBb7tgY=eUGw%5c*`5NRNHR{=-yAq{XYv3_6Z5 z8Q1j~74O1uRUPSej%UAA8OSe)-y;o`s&I*A3>#`*$~GH`{%B@M3iU(&b#p@a54^Mu znogHipfwJrWIzHT4)@nTuQbGTk&2QZi)|PjMd6X(Q1rL1@+ybOB<41v5(XC_!m9lP zf(w#Kn(0Fu$iiqiBL1aiu0-|RzDP0N*a$HLrd##a%n5IpfG2$Z%&zrU|lln#4;c$sYZU~aZayN zFhYki2#%?=h*$oA{*M3ufcbwQ^7|HT3I!PqjG7P(jO_n{2*7M^Z|dfjt&3oSx$GgN zvbKpABTb?uTW4}(){FgyPw$e$U+Y2}P&>1)D(ETq&5Is@biQ_#inIbc{P!K?bNWvt^ z_)!#ecZdkvP3a5A>W2@KRDkY?G46}PM{-#0$%`EXpnuCF_7}kaMg-XHT;Du5y})!j z?vUJS5^=a5zzt{hN#Md z?>_q%FdV{H3gMb9ifU@NS(1!mUc$r0Irl)D9fEGVmbN8#oExGxv|k#UY>zlIi(+=1 z9nxy+<5sd?9<0u2RRRM)%G+Nt0>qKm7j94-+jzfB2Sm)al0w-H>{>Y@2=OHh>pmV6wWY}*R_90uI2f~J0_ zMdz@bQ+`*`)}`|jqm51iuJ=>&nI!Hk{B+Cm(JAq6Ju_W$G^f`%Cw7KX)yWxES*vUS z8S04_#zLnl?db|ddYjz307$81L3e|Wfy>lKj&Di@`eT(+sSCuFTwc&oVYNN!vVFql znBkgzdZ0MmFs_<6VYEpf`5Cah>7bfkLI;yr$ZK4p$D()|i#_ zU2X*=a(eJwV?3o0j$)AU=lGC3od5w~hauX2B4m)VWH+j?UncvrXg$}wEMR_YDkr02 zRZ>bNUNQBcNgY4@l`|Fsay{lqG|FRaJ}cHqpv7_~xp*{y09! zj!Nh!Um*`wu^G)Wqb4|Y8!eK13$j?x*mu=TxR@MT6f1RCRnVV*YE8vlF|Yzwi4K?c zZxb_YiYfJ4r{!r%$GVyd3*59S7&k2xu}0kMICt|nF4_4M^+0~lHQ%h|ci)$m;9JWv zt7ABtbItF<9Dt?bL++#*pC|WUbM0<+MLZ5t26>CF5>7tMyuv=QigP2bS2g=vKMso% z5-Qq4Dqu6gcXQDDf8S9EoXZ2e8dn5r2nV_Y-q~YCjN8O48M`TCTRMSOmfSLytN}R! zM(#qyRGsz$P>MlK)mUDv2OA-mlonIflSp@XPLp$LF3bldZg365N~y|EzUqSwXl@)^ zNlb;2tjs2@3RmWP!E`*d@13(R4Jr;>YGfx0`WU>&n1D!ZgrVgs}X*Euu@bK6Mg8QGB*2(LO@D!!zg zG>rVShEpPmSq`nkvOxyJD|3%{)_jk`dw3ARJ3j39nHT-Iomnm271q0Az43`+buj)8;P(Ki;3>vcFKLare;Hp&?d#La!FGB``P) z{Q8~>QY0MapD^75-Ra;Y19=5P#6Fij84_g0}JUncPSZl?)je<3N(0vvx=%F zRZ(e)dj>48vce?Q3^M-4)ahz{xMSrs)e`oBh|}q2YB{d*l!~TNpd*aYb*cLD8sX%4 zPB!yjx_ycu#$o`U`u0y5_Yj|pa9oz!lY><3n?VKN>5=v`9lWuH>>qh$Go$i4{?^7$P*X^AVrD9|rG4LRwE|TQ z?NA^1ITJu#p(maY`TP9^;p|RF^GEv+{a@^3@=g>E?=Y7jhQz(D`uYQ6J?YXXR)u?t zeHB|%pQ6zi0)*5lwHT}nfm~T%kmhPOb$38&u^8N;nr>5xhou7!sM_Fw@igJsI%yVH z8IX&q&sQA$gI-a52ahIa9=(cUt;5T_`rHgJB?lbdHRoW{(Y2-}$SeNoL{EfL{C5W) z&yCNh^skrzN>EcBT7UXMt^i#zE9>#@&}ulbn>IQ%6|7BntXiQ2CY3pEPTobMB%&Zt z39E$8_z=xivtqtzr2En@Sm(DyIJpyxHL(QNS~{nxylRZ!SL3X+8GYD$A~(fm0G3%XUMNvB#6Lg?+l48hp!>H)IAj|F7k2PG3H5$A7|q}WKam}!mlKFltD#k8{nNAsr4+RN^rt#DGAfp1huiJ{Zf-FWQxJ~gb^-=+Q7l&px~2*?2Y zUcM}A5vV+PH$nj_a~#uGNfQx9W4N>P;~sy88@u062xCgZQ2e2Sz|4fcVGw@1~1Z}8>vOxjRJ&WuOM{_-ql=}uFW4RK`1ADdIS z#wHo?C>qt&PohMbAIl8?VuLbhSFv$6+=qg*@hL&ZA0}( z#-kcjH*6jNb&`b~e=A{#X&I>JBHn6_xMXQr;953lep+Z+sBm&nxYXO=*S_Dfp4obm z(+JmZSaFe8TJy8z!)Onx`)K4LNYy_R4sAx|r||V~Q^LNS2F|m8g+HA~Lpq7ZK{lM6 zbp|z0F}CN9Eailaoo5)tadv(L_pjM`XH)wT8EbB5odRh#;8#aw;Q;3I+{b%F)aT%G zz{B-7{$d=z*%9EKE#g}=IvE?`Fk~$23pAdMOk|b0F)dYF7XcSNRBoH%#CUm|JG?Lx2qWr2}t|E9r^$Fvr?r`0u-fH;OIYE&B8m@xm=-?Viky5ngdY z4H(VSowFGWOewu;ROyYd214Wkq0ILaZxLb9amFB}RF+S{KXIz1K(7aG22R`mmankxVT`3-@j1ToF_{G$m3Z zC?k6aKDG`sB=W)&Rddb)#P2PrW@(p zr_8Y=iMYTi`Xvg@^*VsP9!^G4lE?IuLw&t84Z-iG;Gg z9oqH$I_m>)V5P4YhTqj|BgTLD6JUNF9O}+#{t{Ne%iq2wG0Ua zZ^$}-PJbJ+U?d!uqAf2_OA$+lWIP`~$k*f}R$a1wTaaO_MuW3(h7tQg#aaT}AZMgCw8^ zL)%4Zzyk>ZzngAGT`*h32UPW4&=a)xv7)mJ6|}X7bzYI|!RQ5W3!uF;3kWdIBtLKH znY8sK9WE4$%9v;uX`>9=fftb0m7nn`_aPMli>?c%u7jSJI}xJfhO`zXkb6G0ibGcg zmt*RWW6=*Z)p#=WfYGRH3FEfb`;n<&S_6nm|KVfxqRJ_mD81`Qf3f1BVWcKC%XhMy ze#BlTHbwrA76H&=V5jv4nbFt}5j5+L8GdP*yy4tf3Dca&Ff?LYDd?_}+3FbY$>K`m3j0W`3UlSFx z6IcI!%H$coMn&5j=?Oc%OCHh53Uwc*bjf1^l+6yS%WZ(B7iTHgEOUnuhM4f$_hu(c zIqV7r%heXPv-r``U5bTeU2J-8bm&pV#;=#71N)VOevX zLE4LbsNGIGFs+~+F0Kz$9^H#@?x{t?N5a%ZW@pyx#q2+)zCpTo#aOZpw&j!=hg9I{ zsY2TtjqEHKsh}O1_`+qRtX{bw8e%D`pfq5zmjHOF56i`Uq1)p8KG+(Ti}%78NH)WV z*91M^VPByWIBIQ=E(uajnY2qGV%?f~zf)i<|&U(o% z{Xf#X&4i)pd-u{P8-HGE?wj@QKe^5*roMI zPR9Qk!-*HppShI9;U3$XLdt}xu4~|{i~^cBZd-)#2?uV9DXKFJf?Un=vH!wX9_iJT zmQ8;(lti&c)J^8)lAM>m&^b!LgDgl{dkqo!eKn_d+O?aIrd+m=$i{C;z2=iZ>bW;T z0;mw{B9itdBjGlFA{xS&6+?VaZW}+5FNUZu2Id9XD7gHIjUxgfLL4Pq)nd&I007On z0CRJ2Og0Yzb()b}xT_~RKj-a0hxbs zBLLwu>LlIq?8&hNz1T0+ouq@s6)s+ZJ&6f|gw%7Mzbeu|redE3SJOIRO~1v(8%@6y z#_#=V=hlEc*=8rwfIZos6boMX1@NQy!C6;>L*|_Iyk~CidEddXG^<;DM1Ip?#$Q?O zh&umdFd=LW-vs9q8iL^9l-0;r0{1^5E&fNh+}5pl{FyaNZnI(i^)FIIipOn*iMtZd z5)C8WxyN{NqOoGxt_n*wRH!J3yx*;ATuN)uY<4`U?2j+k(uJnhvC`7913)qEmZ zFBliD@{)0O8|Urc~~+bU+~QlliU1MCCAUoKPj~ z_*h~>nsR=yv{L1}`ai~FofnO-=$atE6`C97y`g6x!cr5QP3fbz->}A7ge!ZM}*QvP$%i%!0_)qB!UhG$mQZ41|6D z$VD8WiBWc{)kkANKUyJu1C(yA>dU-;Vqra_TBV)jyi3nF(r;M88qQaGM|myOraT~2 zddHz4-g^((uc~%$@dr$j{qUB_@UNrEuzLci(7PN*y;(@JU4Eam z2FQomOxCb6ZbkkP0&@|~*$>CP8u42gX54&J^(Fd@_ytP98o~wjCvM6BnEN~*q-5#5 z5($Zzv$7m>5C}LX#`?c$M@<-bV__&YNq*1?QWEuZ5;p;fDtZrQ z@n!!>)<08!^Go!fnkd=^ydL<()~aS#jjoIw{KRc;#p-4@x4r%w~7$O%6F4 zIsMg`#NShmr^?S;44Zy&|2hQ5|G57EKA}W5-?v(p-;HuUrU&A_t&nn;uY`4I+7Bum z+^My7D)xb}sL&09QyIKNNopVVHC>+pTddkXp}>vv5j(6-WvS%BivIJ%T>_0xG$*wi zD=aSL7bg1V{py6{^@BUlwoZwmfY$%)Y51+vkq*j}P5P95A^5FIGpwyrGrTEU@9#tS zqldzRMLVB?{-RPu!e!)9aIai+sVJj|rXnRD-jbmtabut6@KY7ugd|$GX#N_&(8!Go zR&Fl_ym0xBI(_qw99HhI^~Xz9(Q6h-uJ#So>#LN{OOoJ6g2h7;iE&DF_uhy$mcP=P zYl}hm)d$(o0R+|)+uYzQGz(t4;pSVt~{=gc9tYn5wdi|F)h;Ha<-P2V}Nb>-zL zL^aVaD{QFI#K7I74TU^Ssix?rQ3w4OL* zQW8inn{6*8_Po5YFK+SGS9-tRIXqqVWE2f_K}--HJyu|K$BQQytbhOqtz67RQBOld zA15}#iRVLm)QV;PAt(u~xk}%iM0ArH_NFkqUX1J1dwWAOU%Go=7XA^KguG;o$t)~X zTaD^0K&?lS(>@e$SH_=>wnPVFaK64)b>=P&X$prM`4->)%I2d$YAsF24qs#=R-Hw- za2l!p*!qq;9$Lfw%8tE&lmEJ}C|9n-Oq39YwY9uG$sxu!Zv72_QKn-0w3xy@mDis5 z9WTFbU|V8|nY8Bu(^n2tk-$D@MUBO^Hb2Pr5Yk(a<+wR|8-4ggu7YT4aF60sdLbf4 zpN}&$pqrZDoF$7|>d2Ujrr`cCgx@4{cVT?5AlM-uoXa5UoHKe)3#nzt3Q5L=qhmG{ zUT%QphSN~0N769y10#*;UYf$TcUlJdsK2{1VsM-^DE*Uxvwa~7 ztldRNW>7iG|uW&8S8$&f7HXn!aTY~P|Ffi}u_F>}iM`VHZ~%6E-lT6)z` zo;}993F8`e1d#W6n<>c!db~ z%}T^;+R3&E^C{>!(0<(|2vn8EeBWGiHyp3xycFA=8qNBvYzN*b6r8NXS%I zrklQ-wRK^WShowaPx>8&o^ge)L6%6EpM+nVQWFVqnN?FSMe+xY{~fsG$}ifq7e95O zz!yO0kQT2$;CpGIEil4BYK^^IE$m`-C9(>0ZxG_V#Lza1;%rt$Q_9d`$SJbnH%vL6=kzwGqrT0W8r8Dk{@AtQCdi8t)qN`L zo6`&|9J8~K%+Ftz&nouI>fxv{jc89n%9s_VVY3$k~d z-qPc9Rek8Yj6TF@3ewVE5mGom5;Ff1WWI=YK!Wn)leDbzTzv$83~lA~d<>O=qaWAX zF!pW`*tB(a4qyB5SB|WMV-7S2mykS+ zp{Srub?y5cd9bXwGCvVc!W+(<^!>TX*&Nk~9KS8i;Pc+atK4RP3Gd`3BmiX(RsJB#n|I%0sw@R6 znI>0?&6diY$&iO?ctyhYVP3~v!&ro8ezi_zC2S|SW!I<1t5fno98+5yyT&vsO+#Mg zFpaYNoMM}f`S{3nH$ac59l^c!H*_UV7p=psi@HzLZ4(dIM-!L!U>_z5Xxr!yd?T+r zz~JBa4wYd=_MP#Da*cF>?x0!P-4M@&t!?zxu6>w`lcyWE)cx;r zGN0`aD}`Rz&}UoeSi*8S)i-eFTyc65(l5FCP=uz{S+R7{Y ziD@{XjKL3L=I$-JBt(_G#zmY5%lr$JH+kYvz&kL{-ed*mJIo*2Y4OZ&+Op2Ykk`55 zSadjzDUb@3m;HIQY*C_ir<6d2esZ3;%_*jB#2?zIA>9RNpFc&5Bo90)^12g%uzB+$Ip~Jh%z7$;vdIoaS|c&9#jI4SeG3qycTv}`qyhf4th!FC*&SjSl-lm2sqCpV)x!a`x|p46c-U+# zirsRc)7atN)ZDLIb!_aGM}<1ImjGu^Yq6X-Qo}tGTkD>>`RSg$;HS;SZBTWEudQT6#{RQ!57^WF_ z#~D`VTCpm;2uM~#5b3&KIr@WRHCq|R@~@{43Xm%;fUMQ?1fmRu&P&-m6`~)z1b=bn zf|8ps;ohdMnt^Fw-J1aG(EzHo6Wh}(O9x1E*n5MtjS zj9S>ad|_b&3mB-lRA3WNsgJd%l4w^= zaYBv9IQ6a7LK-N@bT-XYlhU=UQG~|*0@&)yo$53&g-<2YJIwIPH z9^Y|Jz|-oxfFEF;%fa&bzIqI1-*23nhr`J-qTS-XMSQlPyGRD z9lI+##r(Aci&&YtgjWQ1Qo5@KsYEm(pJHf>xkz{1>35DWK1qWze+h2 zAb^mP-d09=qxRbM)zDthFwswZTLQ^=RbF&oMB{QTW+4ww)ewtt=^DgBvfWJ zr@&={(8eO}sv|VoXRL9Q^{Tjf$|bBlc+1Vu*mUtDEsJSkZ*I2E;`l@93fsOx^uMp2 zxLhPx9sL`0i(#cD$b?sa7rUD(sJGbxe9xakSwf- zEZzst(TaG$M4Vo}jaK}Ji*wqPqJn>o8a0nU#?NdjJz^@@{LO%&5;_diURS08#h2@+0kFm{Q>he*otP&FWh2ZANSlY={`#QpS)n zq65#7k?Cc@Xl+mxV>yJ+{o?0uV%=%ZkFoggD_f0}Tujsm$}Y5Yix!}*2NZm82GXqI z+uv*;?zw;(`(F;>r40hBMZ!#|ny-VQY}-hZ@GUf%izCD0%eGiBw&gXwfF)mq-VoTa z>)ytHiJQv=Ct}VIIOyTOR+HomZ_-N-6CTTF)az+ci)YlsM`uaYT(%1hIvR=h3IW>t zOgNKpQImX)nUD_moJ%v@T~Hj3!p3mtotzqWQ)VDpgGF3(`;LjJF{?-+L6|me((BH^ zzdxHIE?dV`Goox5dER~O014USQfcR*zeem&zsF@HA?icexmGg&@mz!rAvh&l-cvm| zXkPOfagEj-JsZD^)A4bbVunv}aH4tA8zLQLgEzeUnz+WD*Ve;B9+!OaS9k0j>r^bq z9p^cq**qIVH2NMHjLX&Xt2BW?6EHyXj-9Tsz*9g2^W^-q1ywS0}nKSELW{t;gso^`x8Y=bkhi&sUc#}S$s^`^^pUdoaMo9Z&3rjy%uUQuxgNAI5M z!jU|tL!?-Pb{~0TS%pM~hFrDDuJx1ar~W&ALucbVdE{Tq4WNWI)bQ@ncL+zaie+u* zz>{B2vMm)%X;GD&|L0Ww{Xd^>TRyd6g0Tts&)eL@^GXb+p!EYE!4(`3Yj(jXSU#@P zHY%dT;x5rW;S$q1wxs(CN9FH`1?LdnwpdjZe=2eg+4+m%ddUOuq%-XS+oYaZaV%wC zoyle#IafD3@;#7Z320;6HFM|x!KRtCjAif`8ndU2`I^h3q`B zj90tbS$bV%AD=k?d*Y_0VClkPfPq!ffq{|!&wb0}6cUiF3+JzK*d#*3kuHG8tWVlo z7b{EYw-W!uMmAVdLKJ*Z6wA@dF#(}rP{4~+ta2$>=hAFbuaVh9fbgt#eeHPtZR`A;;WO9InmX}-oV3Td`+c${>k*jp>W*9fdU|k^|PyHh4k9Fs;97WI5P+r3C3jTW&2t zCA5TyGm<&<=cROlo6{WNf{E62gzz&O!W6v?uZno-6iW5|)=)OsamY6Y%y8eCQpcv* zL1V?`xI@VX#m-H+$8>_mT8ywW21c~|Q6HT;-GGARMlrjW=MU<8c9G@3UOwMycig}` z6Q+^!ymI=RzUjdmrRh^CWph4?=Zn!+J>9>u|B>F64rRHy4-Iro2x|JqAD3CIi?j3S zAKy=a=@@@C490a=0#>~{3Eu*3Q~lj|0O!3;DbPN?M0~gUo9*s2t-st4?$PHtu%lG=s+Ok0IhU7Vo zjjc_(Q6=soK*}hNlDp>_^?YMV$g1up$Tq&rsB3+py~CrSeKcoK8KA5eAXc(LD@k`m zAUL1D?=RMGXtyOZs}iD5vhHxu$?bs7EIAX}wV?-H%xM#-c5xFfC5|@f88lc{0-l94 z&izQF$NHv&NOn~7#Z$83>zMNDL`yYZi{(~ul@N%($-@Oj1*|bMrXX6vKmoJ zgIK#8J|%PN)KD?xCQkU5bgx~bKs0-d%ij@?O5gb~X-2v+!@0R)CY{bADlBt+60F5@ zH)^bI^Z})T@O13#g)ukj3vg>G-em$StunD}&CFNqLjAyjl-|fKrgcU1rIoXqFaD|EzLm5>}Bf&0h?kO$JA=jtBpsZ={X;W z_Q0=$qYed0zr&2SCHapRkV@gl|8~~?a5Gi(V&yU_3sR{-0#}dZGx~wlRF^zg&|Hd; z5SfIxNuVFXRtyDq=`8|Hcb|om#g1>EweA z`4l_MtCLagh%4eH*DD8Uht4*GcllAKFleSns9Its({GUZSP^f}wa1&9*Hl{Mj)191 znaQ=Pi#l7_exNcB45{gqQK~=reDk>-C%FnXD?zqVSvkXhK_g4b{dLWj_rkcWGv*=L zP24iERmEa2U~b3ds`hh2HX)ee51%MFjf92aRO)ED}w-pPMVHx@M%EqigC|AAZ$g{p>*`@aqsf!(o>1{%F$N)>Y^a z`|&!cw4gJ`XKo_Zs*kktwSVmMKOM(a==zV8QrHYQS>xUV+#$9Qo*sgV@_A7km4sdV zOobqko<1}WOUfipcK^}*WTlS~0!$kt8uPfy`exf~0$I~TV^i`ZlPA9yYHi7~<|uoz zQ9-Ka8&*IbFo$-gep>p#kqmu;mn~v5|Ug3swFi0OUGY-F9bT{cAHP&+HKo7(KJ+TXWMjW*MqKE6R$jLo@UKRd0 z+f0)}^nf~NtM=jh$p|kLG>?<)5EDg-bpzM@)4fBecw4BrjiZaJBW=P=V!n4jeEvrx zCk64`UYCy5UY(LGcO^Vmk;>NZWLOWNr^ z)xl^2!hgNk4PC{_<~kEi;$eq`vmxn422m>Aar*eT_hdALAuF9z4XyA@1-^x&6Or-Y z@pJOhx=Oor@@_##x7S31y7HMFQi*MsAHDz1nQgk7gnJTbX=}P{F#ny@`Fl3U$mwfS zW7j6cv*18oH+zQEH`moaE~aOR57IpKDI-b%3b-sNO$pWAL(IwlP!*-4GG$NxK+81r z^(z}t92D34%dRe0pQtG)XDu|J|o)lDa4 zr=%*vVyRRpDnJu(SM?*OWFN9F^Q(IU5Ira-GJY3D{kQjbkJaA(9E4?)9{DQ4nVet=Live$U~4HVp{Xnv))ejR`ON6skVS(AZpFn90Z(We6e&5p8wB{vn% z_K|XAMf2eWLTiy$~5=T%s9 zi_Ob<772J|lsXGHkpq)#rvugw?*1kF-`VGw2- z>-dV5EJM*XQYUobO$hW{ENGL)F@xK@D;2sA+Ktgt=;oFWlLli4aZC%onq%uk|5WzS z6GW5A#`Z|c(AabBYPqNS?zEON!F8}xIsAh5)0Gtz7K(vSS_eS`bDQYqIzCg{O?UNd zA8*>fqwzK*E&YiusPxn@@oT<+He!Tm)N!>XaZFv(|EQ0~yC1M?-1vu)t^A~uTO*M^ z#ZUD%&cW*jF00leDB@r{`t>X|HRvh;!%5{y;ZiVzdl26-oE#nuFMh4N>=Nv3+ep%# z5c_Vp4FprQNfW)js2%VJ{XUyZu)`?XLR~cfI{`LP*1Bb03?PfJHn3IpMo$r>ur$y6 zSR)~Wn-6j*dH^*@VS;Sgut6A`HgY}$N)f8}UK9=I?$D3l>Cg_0adM4UFfB+F>4 zAFK$jS*%%NWG!QZ(^jc zTt#a5c&Z%SnLLa?U`0o8%Fv0zcQcH?+g#{z?q`hW6tSH|*WN;R1!(9bvgE>X2}j2v zzd_rW>FdtmOw`iWUX&IHXFPx_1^TCD70=WoCNYrOQ7IMD%L-4YxWx=GF&xXc$9Ka= z-huEoB>|qF@;|U=nd)j`{KikT-uO8)p@mRpE_JvvOXi7AUQY;CE{CQB>S0nx@4C$ov;}zc6F*?R;u!fPO_kBXVn584 zmqlkT%UCxX|~vUy^qH^T|!8*a5NRpe;fZLMB&TX>^CfhVCYn)J;aZL2Pygg#n@>h0r^K5r~ZaktY zA1z?<`Xd=QtnAYYc ziPokoS53KW1Q37$osHoYGaKKVnq|X%fWxpZRj?f&HVi3X9MIr@f9Ln$**>9aL6({W zfERcp^5s*d6H?YSs91?rY4FdkcZ$9T|MGCXvf!3=Ov3RC#4Yj-t@)>U0YVFrJe9ny z>*|NGt2C$mvW`+?CgF>mzoQGIf^k4!dIRC()A$ZjhAyzB)JPno`E>YWgg_bY)wEdV z8f3m5ud6Lkvd0sDnNH|SJC^}mwa<<%ySCc1BGO(N#>3C^Sg1%avl}yEdtpa@+);hh<&;Sk+G$w*Ito3sy;Xb z<*J!@qFHG6fKlsp33jo^eOKF>m4lb{)rxwDT*?CYsUl9NH-lPQCv0Kf&jo(~=1J4^ zX)Bks3qm~>n--p9X7+OxTQ~h3r?+OsFq??&#Cz<_`NR`&>wnGQ9XPYI|4=O&&K?D6 z3RoNnw()5ckq?q1u(>Fh8aCb`JH`dtiujGX%o_siie)SqZ1N<`3H)3Lf<*B9@@^Sa zc?4mRt5`#qR6=J|g-=|UEh)|cZs;`4h#+kUl->B#?Z_V)pp{ynh1?(se?b$?wlmbW z(QmZa{g%STy@I2A^Vb|2y<>DCTIkXRwxGt}w;TO+3zl|zaoS@ybJ=%#&*~m&J%1Us ztacJC*YYMmoHF*vcW{AjzQ@6@5f48_!>{pp1WOWxYh6nb@-|)17BpS}pjS7=G?hFI zyaxoLqJ=8CA zFUJN_v}(TyfrQ%)C;&b{7=e%YX!>VcG}BSjV0w=rb--Rzj#vEgc-?F;e4&eKVT3-9@yxZkG*^|Gr{ zl~0U%`UN)fLU73InC0X>>672)@S!%E6pw#AvV6&DIVoN#{kb9WJ$FBI#dmpyv*F{u zCC`lQ8P`nG(ZwvAJ44R(=z{YSTs4(*Y69zA_6>Y6mj@%;?Ij&*^hxP~@I< zxBI^?v5Gtx+Eu^pj35voAO!y_sS!vc0N(6o<*Fr=n)Th^PVW7|DUg z(8vVPrsQu`i5b;W7I|nq+*60&&y&p)n8;#py%kMnAbZBHKy^gYxE^@N{PIT=X3e3JRh{*A?2E ztDQ;)kXUW9^5ieF%LtSQlxcNN+M*3kXyfE@ZbyU{I^n1hlym$vegFpzz2B5wJiU1i zk_WY#f+H}qTPS5o7s-u)hU&ON0OrJ%lOV-CR5h1>_gsm+)HWrn2oI(>yf#4NscbGB zex`rRil5d9!qPa{xW#dTnu5$xUaj;y0yu~Y;sv2%f;cs=O0PV&c^xd6- zIlVBdNwH4nU>Khd=;_>(6|}uVfN17D+@gf(2$x`x?;piaXz~P?s#x8|?8qZdO0K}D~|c`0|=j+9xrSxc_qSyswI>{+Q`F7(dz>)ppM z(EpTkEeS1S8xyKzr zz4F(2{5=FJJj}q^QY7!?G^aN@r{%G3OaMs7Kn3Txe$`JzIhp%$+$o=qdj;}m8`j|% zI)pU`VU z*mBT=K)L)jCaClD{zhVE#QH9Fd$GQml!it5H$S|WOj$QHBuby?BFaeK^yvOxzEva& zhE2`nW0NIyWbp-J_B>+{q++hne& zd1u?7d=eJp)~Ds}yuM1Cw^nqdazn{q1g<8$(%J8_uEsN3zCNeUzQF7ez9M&cB83vG zhva)iBjc(1;jy#T6cR6FG}M0sBpLtd56fy4V&>PHxEjjujJk>dGk)6+*yCz3T8{ya zuIniI@cSvQN3i&UF-2Y*!>5MdSFmLFoS3|I2W-&wLU$t=Nv{>r^iU4y$F9*C?^WWu zFb0-PK1I>CB6qbIy?<>XAJL25Y*2PlQ0S@N_@ML4+grCcR%24g3^h#vn!>G;X&^Kg z>*2ieOSz+WC^=j_e61ZlEnO@OEPWqFt!=*n5)ORaP8tHgy6uUh@0~@Z6`uUnEj5m zPYpT*?L?z;@aj3XPCe36Y!ian}~cBvLO<}mi4t{@hkOlmS<{4MhVYM8cvf@Z=D z@u$YIc74PHaaRpMX3wNpM<7ykD2*uQ<1=u@Tp1vaG8zb56y9k&Oit~i2Ol*I4PW4{ z+z;iTFhGq%ek}yQblnInVW=RAu+ZJ_OlE!m>BhnXm8I5>J>Wh-a|o~1A0u)^@l1t1 zV*Z>~DSG1s-;H#V(ps?n&Rt_)cSDWdy~q%#v3a__Px2e!Td>peoE5%h_59~BV?|_Y-`2GoyxTV{rCiusZQX^9{SQ3ry3uXm8bG6nnBuY2rn3ggZv>n!z2Jlq$6e^hzH3pjLK_I68+yrVphdZFUv z869s>!qOmwF)0B^C~3pzWZ1WRv0kz2)!oRZsn%N(J^>o=Mo@&FOXl&t9uKdhJtx3sh?UJp_vjp@;mDQ%#2DM`S;aY9 zPyYgNJV??C0&#Uz4lhy7Uu%}N8{-q}nCN94flIJ=+L?(yxuXm}T9JK97R@;Mhd|IB zi6Fo5Me>|Bb&>u^8Vdm`aArF&6OcD#Q%I#G!8H*sFMpvv9tDOw`_Nxs7|FMI*bUf`5It9{m^ z9B{>;j4qAqzmvE$KCoR+jGsR}8AAXu*TPUQX+vOjnMEC;Wc2BAT(8J9UdLAD=8=U7%X@&v&0$Ho z4J*WK16*qd4u6#zLl#})8+wf$H;L^pvTRgQ^0^yh^uQXz^!AA6EN7mBDz2jAy%R5e zI-naVtw85LF?9;v8RJW}H?K`|!B>{E2W(HDWKMMxmt(K$3uC+!(!~I{O0T24@mys* zJyBJ>zoTFOC##D5Kfp0#&+)DJ?}!lZH-C)be@{j_bs~V43fd=|1P2lBXeW!fR$)_+ zmLk14S>tasQgkq~pGCT-M499vK&Rai2~fL-S4}-!v(rF3gh`Ic`4ooXNS`OMvMpKS z{l!t^`o~o(AFqq1?xrQ#&)2sYe$b9kJD=l$!T>6QQ-+g%7isF$n_iMAyO&&jRas)i zzj@m>5KaJ?Lm%_LLQDltI`KNn0T}{i8VUDGabO&C8nB9-g0*yR&`71NYMV7*S3ct3I@ev*W^)hs(gUpRg=48m5CY z(qh;{qpP`19+M0Y=B@xMotBiQheWzS4F}q${1sp(W|S?3 zg)unrh=vcvU7_7@9_7?j-f8{A|-U=m*b8Vqht|Xc~E9=H#=R+LfBrwCCv` znq8vILzzKXj`M;=zqEQ zFQvZN$ohnAvU*fW#x!akXbY>I+H5m+xgmRMYBxH|c6`QLt#zey=%_M4ekl>BSO=j* zZd@k5M*`6t4u=b&f&P?Nu{?-CFM1f7n;W2mPOyiHp0mr0p0np4PJdk#P^0V}Wd{I< zl-pDUqfJUt|3h08Wn+t0L%sISb|&uuYJ4{41e3|>n)H|??7mAyO%gcbENv(Tw(w+_ zJE}~|s_jqTyw-57JM=r%of~Z74>6>bvAm&%?CN6kd4~7r!}(|GhE0ACTZm@gP(wwG=fnjeoqt4->7E%Hzv(lQWcy)sABb;aX{R?l`6hnEX;A zJPC$iGZ%WNYGM(89FS=W;zEc`ZS(!R@(|}Y@!32U^HZb+Vek`(hR5n-P|ya@bOmOJ zZ?_1|S6%e8vZn^ViMa#u3HpSDKHiBESw$a3L&O}-x0hO=T-IsPyWd9eJqtqQmk?%z z@5`DGaW%2>SwouRd>Z+&C-e3|vcP8Ieq>^}2c0RmgAZ=_x*?dYzH>VB_g&BIx8ZsY z^7VyXh3J~^>G;V@s0#bU#t960%38G#Ngvm}TFBHRZW?hXhO z`*lfhuOoT{8fQmlp^f02(F(sPXbn&r!jYF7`3MUTu^*s54PGE>w$&mJCA1A2`J7SE z_vsAok=)W&g`81W#5`>}!c(r9s21!4sl`snni``W?sv_!d#z}K3AU)5mKdZM6li37 z-I7+Dyg~I!9z}J%?)3h%sUL^e6;0sP5ds`72J!{?<>wP$5M}0WGeaY)Fsi_zi+c-I z+b5G8M6c_x-Y8iRGu{Lf^dOpKc53BTuKiYeYIqr~KI&NHvo1(E^3UELG0 z>Ml=Z*>=m(56xcl^&L;96{wpRmuXoxa}6&Vq^Q0A7tHfG)k|!v*=<{OkyF_o-qs$2 zvioW}M+F;~p5$gk>)vZHi*y|w@_?!lyQw!vr8>{LkcQ+Ug_*hEC3|$$W#<*?A?06> z<)UJLz% zBNEJZ9kfw*$xfqhY<*tA(*RD1+NoKD6BG7*_VZ-q*W$2SqxN|0XoL>f#){BP0(IZig0~ewj*JqovsNz<2n{)~bdbIq@R z!2ahJUHuFJ9r_6Zl8Bl>pw9pZ^FsX|H>=`kJT%~jv1%E^Kz<0uGrG0Lqy17^f0d{G zWjQyfA!X$iR*7DLzRhAG=vjE8v)V}1Dd#!Vs6W#R&6_84^~U!V>zn2Aw^G$G@BZ%S zX|p5kDPwP9T&wdo2U8ES7VisTrw$1pac_d@^ji3*-008UOgLz{n@KHzpt}{89qi5p zQ}!UYG0QeKc7y|6klUzwxD!0-3myLEM8DVbwwH%HfzuYjAe1}b6gR>CLP(!`@)6?Q zbsR(X-V0bm&e4Kc|1;JU!O;$?fp`3o+|Bg~5+CZI*^1v(-Sd&j`9I9%e-Rz8#5G-L zgDETTHMKozLeDr;dS;IRW}*&4BgDg0ABjQsYZ=H+nmw39lSBvBNIMC0&h-p@{L>2@ z>`OdcECTG)Uvlv9#hyT*7@6>^YRbE#8NiaAn+9LC#(*Tf)NA0X!fr%Cxr=Ay@- zGOTm;T`R8}Y+8JoW_w(TkW2zl!c*1~`>fV>);zTV#dV;(_bO=FNp&po7|S1`J&H!7 zxlwM)>PJx;^aL(Im122OJSR@UX-HYgY<0!B8?O^hslFdndVxa|AU}1s_ydJ)N&SGJ zoOXy*+_`ck0i4&2S+nAtCGeW~Aiu0k1Joiim6I!$dEOc@ngD6QD;HK###~LX?`+p4 zUw?k_gNQ~xw}fTP3QI$TTSbkA2>A_gOH>HGH4=tehuV@HN1#ZbqC$5xcE$u3ajpdW z@!P8~J=rw;7fk6P+RMs*L15&xjX1Q8nQzZE3R%5Vos!R6Ls zClaTC77u3w#3L%1Npr*=7x7vONCbryTPgF5LOAHm8L-23`BnE}q?PzSHP2q{}fanX8iYL}Z z>1F!xvi3qSJKQi`5m$A6S4wMbW2Xw&I323J$Q_!!3irR=Qz!5SQUeIKLkcO*s21p& zN~v0-f5E^Ie);}OFDF$jXRX>EqPc-#&gdM!ZYqvlyxsi>GHajH#o`J z6I!Pla^C6d)jY>58{2QfDoMPo-6D+mXCh!25S*$>z-CH`=>m=d%}<}6HO`BU2Ro_> zU!BN#dfG)k{4t}2D-0>t1s=vs73J@RRFM(k-_iUz*{x-Av9Z8fZ<~!;;jWb|T`~x` z52%u*^MuYY3gpJk2;FLHLXgWC#Qu~;Rxl?_bvD)9yX*?u`uR#ctmPW$#1H;k&v2sJ zeRZ-04lmaVP7`vV3B~;BE}THPn(3D5@NGS!;@SYol-#Vgq^bo#IdTUFt+h+?%~9C! zMO@!a)1$xlQXi?#C-b&_`iLxbAUFdYp8*&@MlJrp;`am!cZQd8S(CiRhUkhor-bMV zIk$v#g&gFy_xV|aIt`EI2udI*ejR_~5tK7@3c(U7y59qkQs-%> z%Z7*I1VmBf*%#qKE`vo!*&Aa$X1Fk8Y+rAsm4)H>1-PCE1}R>o8HP<;m^{ z5a)4#kb!2}1)HRZSF0oTP2(_OnKCL;7gfDsiCHjd94}bd)H1qi3hQio+~j@S>_kgF z=2=|HIJ=mD7p&dbTreH^k$vXmpGpqjcy$NE`56M<6_(p7lM`*ci3_w`VFD3ak3gk>*&QZ8`r(WKw-y!c z;S6U=>AI@0aCe4p7d|8av62GMdO-5m*bLskP7Z(e*h_X4^xPMs-I%}abB2h-wQ)sjzTh5_=n=}CD2&#A2R6#?d71kHD50GkQka|ecV4oZRkLZ_}dPJ;cr zrjQbzkig3wDrs)Uk+>OwuJOXEkDkvzV)?<b?ww)|L2PQ6D}L;xz-YgBPhCc18AuWKl}HhNQBC_lUo>q8R}?dGbXCxE zZs=m=uTHckfn-b6jl<8}IUCU*+8yj#wlcWckHU=;3mb>W$-}zIfH?qYg11E`c?Irh6gZ&*K;KqAJl0qfJh)t2U>@%?*A4;1kk5l;j8W`SCIPIKeR9=HHJyGbypnGhMXbc2>)2Y9cYW^4ATyg`?aOkMir&0hx_arK=# zw^_L9G%Vta8?4k{K6xfDDHywKaUTgfDwr;}jz2VgIgPiV<*Sb|FaLeSZZY8`0E$PD;m+1KhXzzj%v#Yc@X`!NJ<&352N+oC_Kf## z0iYPsI*${Ruq&y8&p7>?DJ_f{bbabP^xC`Fr|X97D^9qw$wC6E&-E?X@<*YZ34T}Y z)rMPQwF^p@!ivrjz+`m0st`1&J=o|iNc8pC0KdaHNy5-?5~&>GyZ8v$;q0kM!1)ex z;02j*@YuNULJ;%0gPd%JAbcM3`#zTdK*-f;Y0B?)*06V+p+lRa@x>#auz^`f#O;$@ z`8RIU{JoXND>dmK!Q(AtBB_ZyR*Anj8lr5ftsrj#vS@DRQBe9g)Q#jrpz0#w=NnSu z=`;nCRSMbpZ=$Lse_vqZVlYFq(IiRCgB2ZumN$RiT+P8y8w+?qSI+s8lqzE#$;keu zvdh}6?fEMw7nHB$?iZ3+F&PKV!A5a}HN>Aj$Y1GN7v8^4}M3 zJ061!(%+iY5lljrwGco{?bo*)CCg->pXwM^h@OWYJ0Dc|54NaDVkS}X7d)m6goV{P zDR$qqG5Mr4!#nhr&X}9tT8e_Ulv>UQ-Y0@fj)XWW`D`MYMyAtaw$IDsrrA{*|JUaQ zBFI$%(GCh9#!XmIg!tV-bdbt=QZ3Lc^F1-=Qfl~nlFT9Djs&<^!x^D!dA}GU=}BG_ zv4=wSr}n62E+39Xht{-xR)>ZV<1@k)vgkCZVZFy4vjx^#CMpAGDvK+AJy;5`5>N)i zI0R@Elqxr)EA2T)P%Xcu57>V*qYi{ulqJzw6)MfcoO|nKip=fOL}gfQWSUf?ELCQt z*3ZlH2vC$*698~~CJ(~L<_)G+z)1$=FjK@tAu6&A^@-x#XxNJ z`|%KQmW<1Us8^&#Hx_?~xfCTwX6KhVvId6tFew+7oC0Q=70W8DqSDJWlVPcrsWbR& zvY9pvehWb;SO-Fpum#q63h#*w5iYE%9AX}^v`1`U9+kQ}dQ`hb6n8Lu%h;E8RSL=( zKSg+oPY)aGtCwwE0PYvwk0)%Nvb9fdko%Ul7zp$ES&^O^a7ACI^}3x&T$sA-oty=> z7zo-3X8@nMD`6epTEe`XhDw2XQiu1G3FFzCokRQIB zpfK3=Q)Un}JG^wbR4~OLSJdsjIr8d{3^X=|HMR@x?SU5%R_A6k)X=|rx$O`o_yZ^H zQPNGkXu`3x{M7Yrlxe)fGIN?Iisx39Vs*-x6VU%W;-2(`eg=-gdhT6F-%nDwz^ar& z@`Wla$InpsT6ZyH<<+{HLn?#dY16tFe~T39-0ZH};64`R?1Tl`w3u4hwpcXH$QP(f zH=#Vb1zfG&csdvIbULM*`KY#PX)pbjKkC|skWQ1r;c{MF6uYr+ET1@KfR$X`^TshD zfcTMn?;{{_=W`#8&mV2DFVtT5rMXuO1?bf>c4YNuh;vRn@6>#k9upt~{ANov2;CkG z;ZJXt);DXboIUQ5j=-1X861f>i*6j)cZ98KK(9Z43-vVlJ}&s;9s=azo^UtPO>|(RxiF5!5Ur z5Sy(UPssEc*~+=K=Nha_tFI6V*MXrO_a}sJpL^zTol)dl-5qmr=xqoXLO_BbB_WT) zwdmOlfA?Tc98`D2D@+kuVibdkVO;$+(E*Xnvn%w9QQrWGI+1Wh=nTE=ai4mhF>5|V zD+Iiw$xz~3fVIw%8{k^({l7+PzK-kfoY z%dmb82g5X^hTWfX>LXI6A{22$w)5&^p&Yt@_cPTBPd;Sp~O;*9?a)6-vP{t@+G5eZc#6fAK!wgr!WVUgP z?a}%7V)6#9EvJ@uqtVU)s-YfYJga}aEF9j-Va|1l@4t)^6yV};@lFze8W}2a1vD@i z=*^asc54at`i;a-HpEX5Kw_&*9(aNvW^$HC!`1imLq|_%zj}Bl@tpB8Q8G!DV5{Nt zwqQ%Fd*2hOcDk{uS2zv?puOL>vD=G-Hg0P2?}f(jI7W_t8EQuc!g|T)meifXd-KB z=!Rj6=Epr=6!(x0-IhF+S&E4VTsv}1O|O__Z8M(G+FRA?L1Pz_88tzd0NB0>*WEyj zmq_Y{73GXS=K3z_w1TL#85b*+CO2%jzgTPal-JWGhg1>l)&E%$?Ef4Z@VfmkAY0?b z9BBWydx;$(!LEcb;dliOkfo~ShWdT9k;A94V6UQ>&pBek6hhm~YyQy`plq?E7q_Ao z39?`+4xE!tm2M3Ue>$Vz9$-HhfU<|K9Mz=D@xC^D7Mj13E>4gjkE0{-G|d_Nd~|#K ze6;v{e_PrD>F~QUT=s{+wi`g|6Xb5*Gvvl_kWjaa*m1MGfl zu7p?pTG5!mPnzbdzzi?fsbhimI&&K;R}!MdsBdU4!h|gtI?gLq&8yExzi3+{U0|?2`&T8PGP7XyQyDq>b0>Wuh*E*w9U8SRbBh zgjoqFI)taqV7@dNUraQbe{O4Q)K%IT-jNpdDlV58TC$gqL z8~@BUL#>|58j=!}^RX}9B%40Rr~RFx@uU$KqAt*2wiwhv#flU=*)`aNVX!x5`4Uut z4NX$!rw_T9kU|2=PgL&G_E@s$B7JkSpTzC{rR3XdtMS*a)0iY#wHm8KLr?$7ScLsH z{o4xCr2GvcdZDV)R)ww9WLvnsHe?8;6e|qxvDMZ1aYDGm1q=x&JsXn`aN{+=I+Ou` ztt$So?y1^bM6)YPftzp>o{`91)X9{6epSmmThV-r27jophLkPDDWsTnpnTx3#nqTf zN2e~yni09X2bYD1R)&Cb2+Xe$;V4N?!4~P2J&$()DD~2wtJI3Y{V+Ue4tR198-#-4 z`^F~%;CaYwP84KKl3v0rkfORUd{y(TaZpsbg6t@iFB(! zcWAy*udr50SN8;0rH+JRia}0lEf@8bVid%(%;=5uj)TVzAE+FJ1o5j2hx!m6e(EOY z8R1hgl#r+qVlkuvX-P;>ce5i0pd4V-u>MXC2)ga~VEJO2^u%03FpRw;n)DU>)%V_& z>_ID(LNs?+jxdGiOrXtvCv3sKGjI`Hj%NzhA{$F2vm5w0$2^dQ$tdk>9Gt=c z0OMGc#U;M$Z$$U4pcdy$pDR48v5btIp(Mw-XxfU1{88QA5cFh@5I{FMvCG}xqO zKMvs$s3#yOYbH#N@BS3dIrhU@mRp{8!3bY%rAj)gLjB~a72mC|V^krCa~$2Fn70%A zrs)rnUM^9)nNQ~0#vc?}7KoK)IOF9Vz!Ka(>7Y(5cV(c{BNdnW{iK9x%^4yk_Ki=Z z46KC#8A?WueuR-Lg{QeGK8XfwOOgg8EUJkFmqxsYiM}V_81-VSC!>2>V@Qa&5ZzXMRnVZHsHC!?;5iMtmPRsa0PvgB z*&`szBIha1zB+33Dd}s+C6ND;-dl|@NePZ~BmX)dBKV%%qa4O{zDgSN4&6ZM1BBC? zV5d0;N7V{XDheV;&Udz^h>#9b2G!C~L|85zZYjP}+s#T%&}$|09i*bQVC zhNzk?IQ7PM&u&_H5B*jKN4f7szj$&ZYH~~_Q-q$~3uk|@q}Tj~McbascM)PnJD3#W zC&6Hq_iIp^?f_eQ6Gd&HUaqi(ZiyyMj%7T4OW|AXQZ{T|{9|?mr=}g(;`@I!6-;_2 zF$uMvq5z>Oc{Fg5UetfCrL-1c5)bxtJboc4O4{xAe0Tf=rkiIU?qI`6AwPwLgO06j zo40MMQfr50)+BW)n<&l1GB2)Y8E@KGy5NpcQDQb!*fq&7ORKc30 zSYzIxdxLEKK!yJuc9RnpvnzykbdEuT(Hvly z(0@6CS{%AapPBw4l^@{-CNjH@cW@QlghQ*Yz}?hJcFPEuQ_&fZhIq0W z?^{g*4b~UpFkg#p&rc~5|2$dz=C9u2a2OvcY zKkdBy@dDLpY<9%U5c!c27&Ad4UQc*3jGy9wUdx#-O=<*a6=>lsy_x~1pHY4$37WbV z?ameGabqzx+BnF(zuhhHkOF}^zN0^&oJJ-<& zTCccLoNGJp%fhtGNZHz7A~=9S;EDZo+kv)E{H%}AfQiaMyGUKxO8xa>z@`LBhwmSO zMaW_5WWcRzIVpnF)PU2#2?{D@pP49n@$-c8+?>r1@#)c{V}p$8Wei8-U$*TeF-m_3C1?tc zSb&1ZU{SMD65iHzim@rzB0+hLSTGus;E~9gh}ba}5F%oi-0>N}Pi*KkgQ*gd9!U@y z79Fh~?VpLW5boPf8;HpC2jaG}EEp z>7V|Prnw)G|2df|6|MdHtYs6Y6&`K4Q1btXa;@4a?c@X4mNLb&C!1ld?06K|NSMz;13_o{NboK z>2t|B<@31S@Kp0%sNw(Md|M;_%`#dtG4-kM4v)mv(vU<*focz;N6Y;)4mMKh0q`Xe z?fo8VBGNu;CE6qBOJIz2iSCF*V&j0pZNY}TxVjg0jy52+(_BzqP;9Z%ELWUUvNG+g z##!^&n6|A@6>4w{(p`k#=o+{8S^f1%-p6R?w(ZEA0XLi8R>hc!1dKB6tkiIucZ{OR z>(EfJ$|x_2@ED_nZim}ZxyZAy04zCI7+_``B+Ee_6dRZ?YRcHKumsS0#790l3sb-= zKj!lOJK=-vueCt4S*VAVv)*9g=+eh>7_z0&Y#doybpDKc?MyE(6J=Ftq}?bzY^P=3 zod4^M3e|0?66~z=dAyc}kS**8bqOW|`&L}$`=bRf##vm&e1L-{Lt8q!05A*!9J(S+ z>d+|3a=zouK$X#Cxr3YWePkRKX|PbClo3XVb~P{!UWbu2knW}2JZkNa<%y3pywicm zc#hwrh9I+M6z(8Zwnu!zX3CDc#EF5wE1My~mcwf-+@>K1udu_~g@RJAE1zgLT~^Cc zXqV#49?RC+0*mG9*AiU96qc1hu4Ry8?+vK6h8bwU|IDN zcrJRi-?=yy;161lrIu5XT9Z4@4Sp%mvu{`1Jn=N&Z6yRgbcM@zr%0+&qgTy@(oM`9 zXddtJO|{mgH|t7mDCUIjwoUyBt}I`HJL0lvPrLQBE3t?X5M@X`2ZZ!uK%HPOms}!t zDtkZ`l{APxRajihkSq66v7Y@Ni9hMR+uV_Cs&Hu?(nY??X`$@L?}oer^n8)o?o#R^ z=Rv8YZW_>Fp&ew2`aTOcm2@vbvHTTq;(w5&`GobZb2o(g%rU?051Gfuw6X^MVT}yx zicb({leqXZ#^Dy;3?RIaYz;@Sr#*JIh1Cc0uu4~`$kXe}P>cMA-GzxI-2wizg9v9* z#CRfCsB>3GfV>*49X`i`MAC8VjoDu`?QnuFoA^Gm3k&YXaQj9lNLEL}N!}^i_0%Er z#KJK%fwVv~sDXfLjScMz&FLDv8Bu}oA(En=(FLTEMzvu~2js;ef?5*+!SQ@D_azK5 z6p=_Z+Fe?~s>J7=#AusK7IW?U53?8;F=r9_8Rbry%^J6l$wVVKuDoMlqhlgCgg$;- zxW#ApmyDnk`6{AJ9nkFF|CUvV*+%;3Xkk$Qm;phpldGyoGKAukqm$>)<7u3GHzikB{2--!lt1tW;8f@1`gmQ| zm+ETz$nendRPb1V&PrTN=8UA02dF^fH9Qz#Q?>KIJHSXQeTUIz3swA8`w!b3d%1Ei zTxrOf!l~N1!>(hFE_-k6VW$~bo)dK&H}64S{8s$H83){*CY#H*0F-Q3ohc^T7(^Qb z1ZxWv*YT{p^4EB;uIq3q%3pA=gdGaM-ij`%_>YPw@Wvuzqh7?@gg7rX2z5%T@5ez+ zV`9Wtpnh1#Gl6H4; z@G@W)8QsA7%%x@Ga_Olo_a8UXMv1 z<6Fxl0&u?~&=}8Yb1?cnJ8*_BSprhHLE@mSbX@P)r6ZN7sajDieoXZdp!yd*IiFiJyWjZ3kEhhz0lw zwY187h08_1U<*qC7CgU_hW(Z8hDc+EE$b|s)m@0d#$Tm&*j$f2!2S|_B0LIuVJ`J$ zv*I|Um0Ry%XzZ){{#QE2PMfoP>pLA&`|Swk{r{Z`AdCVKg(*1QLD?>?g@@A`Cyo-%9xVM3X}7h&YIAXG~SnqeG6!I(pXVZy%*Z+_C?Xb@b)eNV?1Poq^TQSXlu#Ei%&aQ?AJs z*dhz2>t&Wy8d&qxlP#g=TYi~#@Y(*Z3(_QzF@LB6h|0y}53RE5Y?P)ZTP8=P=#j}E zYa+EC66aULIbezmg=z@nqSiSCkSgkbD`Aq$LbKJdS<*nV1;FS*{<5`Ye3rfwK3ykxP{m} zXofT_i?Zz6U}dXijZ!7!)E#xjr7MC^d?z#@NrB2v+wB!CFsDJm6~pvVgf268tDMNQT18RTQxg_I zu7W6P@~AoxooRK3NZ!XFM=`C@m9X4%NAJ{TiQJrjUw0ud+_8cfjaiwx@R#ebD!!sn zIKHHbH{M2!5q7u1nk%bhc4ySb`DSk94-U%Ob`iuf-pFB3nUaBd^<#m4 zI1zgNcG*56><|ZGQSdVpT#=ASw`n54j*kBUC(+e3O+@|Z1TNP%!YR?XsA1|VZZXt> zDtgqNV2IS4a8C#~8*4+oY?pBF}Fkh-BUuIeHIHjqmy`BFZ<3Zm{dsC?+cc1ZOXt6$g@ z95Hp>GU-Vowk4dvDJE@L&`gdk_PiX~gyxuw`^-Q4DXLl*7=7c%r7|$}Lg9jPlO$F; zt6Z%nm(k`~UM|@91jM8ntm6ZKXl_yahL48d)P@fe*Hf@3X{RBL4aJH!6Dz1=v|;m# zmkT6y1=Cg*?5u!{4%uG7*-{6coR3Msb?YU;c;Wx+>MVfbXtp*C2`&MG!{V^GyIXK~ zcXxN$1b189onXOT65KtwyIX=ol7Ex`*8RTRu9~W;nP+-;rq4{zbf5DcRZVw#9ASlF zll8~(>Z(zBx;HuuTz5YcM$+^YD3&`Ub}4fNd(6v5U$Il8OZTh$#dP1eK-`mVuvW4# z3Q2@boF$S2G+!gMg%TCHo9R}-;w3f zh$yuBBKVlF2EAP2KeS6KvxM|IPoNh&pQFEQt8SuOhR8({@a$XL7r0|F!zVm+Kuln$UaK(BFSLvUo5h4 zDx;t~NmeCeboMFZK-^%z$C1cVtLy6l+jqhzCeORyk5wE zI$zlWZ|+n_9rE)J%y4l`DIBSDtPuP7vta~9BQItV_|X1NGeA3=bbz@?FOo5p(-nq<4dYiXX1d4A%~m zzyF8}g;zezSC*FGc&`iN%ovdN{O~IYdQBeSHH$kCI9_(CcxA-)+Pe?;;eTYEdF8h6 ze}AvvLwZ61NTfMfDu@h>us{e#O+*D@BF>Dk z*eFRyPqU|3!AI;N3uCNPbBZ01ey8!dFn$k7NkBO{yR2;-=SJC+hLt+wLA*=m&KqL; zD27?*#Fz`$^wrM+Pz@TGafZeXc;g^5>?3(i`x@l{M7q*1Vp58D{*19kO_GL)qSxAk zUPhTyj4-0bf)R8Xh6uz?ca0%|XjhK0E(ZbicrBe7-=zcyzEg>-`PHJNb#aFL_N+3V z{Z$`S8k~X&U5EhXnDun+>)Ia_BOEJq_D)M4pRZ-xKiGUOth(gMD9ZdG({If~g~xh} zNpFI6f~(o9F-v_M>Apkq_lH986MA_D#EdH-X8fO*xhpU+XxVX28C{UuWw+l&hQ>jA zRCP8Pc#*{QF`0>>r0}DvdO{F+N=sHh4DOKR;+wIuE7;B}sJF21sD$3fi;XKNNT-md zMWw}99gOpW`Q6S*gg*zs*G6<8QnHqdN+}8{*xa}&^l^M6Kw?oJw^n8?Gc^={g-nN| zk&Kt*(K5fI2r9DY1&TMHMM0$Mz5BSLJyz+2c6!+fvp_6%D6GFS_V;n%^8K81#K>k& zArjav_{4jC+uPql>4NKzxjrr@qntJBajFR%aAuKHShP%@)8VGx1m}~lS_}*LZ|i+( zxQ>bzsortR`}Wkln{GOHkZbkUk*aZQ%^3JpbBqI01F?@_{v-=CVKCa*W%ijlP;Ah? z4&CQiL{h>l)aS713y>j*JUwfjbPC!_w7f(uM815m&e9!lh;DDQ7}*`UDL5<%io=@a zQ#6wqVo^M9{osN<#^-1%F;Ef5S;fu}O6MeSt59cwI91bVCz@&EuB=_LZkoH#MlZiY2kxoLX3`O*9yF_koFH;<|jS?yJ&N>9>7uLx}R%kvqI4+dl(@knR4!WRtV*Lm^ubOL7P$yHY{l*MyOgYYW zI2WEzNtHq@8<&Ncr6!44h1KkUJ^9|C8*fi{-_P6BAQz8m=j}c|lNtHea-TB12AdF> zth%`QvR3~Fxf_&+mM@!l&d?^A$NPTAEnAZI%uI7*w}^&E;&oN67OW#l=O#7q0B=1U zEfe+ev#Vz0v<{B5M50l~D@(*gQa#FzQ}e{QU;pzp@^w%a1fEQ_ewFD1=1F4&VO@)f z60jpZ4HTg@upMAYr)v#2lbWdvCIuBQQP(k;Nqos0)=sGdid!A&9CF=r0k; zJ_GwiLRSww8|tjuoni!|ImEHcEVVnr#s-AcWq2&z+%-@*B8XuLvptcv4}O_?`xpPfRVRzQz>8>fIBq4PM3z{#|NCsTNi2 z`JwR)aw@nEkzILK!v^pZ(|Y?Jt5;ctjwv9juQzQaKWOiK-5I$ebj$f5L!p-4dxei? zMB9fj9!U5orB_&lY`Lse`VH5jjH^=CRUM*NhvxRBwV{U`erc;cwfOk;=H8aeck-*d zLc(nPW=ii!Rq>=Ew++?jVVK60UoMY-of?ZC;z_nQ#li9VGJN##m_rMi_5h4mI+!VQ z2K*RpfMGx>I=^O&c11b@?Qr~5zwU#ciYAax{$;f1tKfTT!R$NGIvZuHWC!xfI@_wG$T@uFuG3A!hQy3{}gGL zMVYj}*c9*H`SP3jO!u(jrF)#g5=@#Q+?gi~>YYnGOJSCQz~(|G5bU^#I@x;fxh8O| zRXpj+|9wX~=_W1Tl3-HtIF5yv@VH|rLx1$m%{cFqovBNJbVw^P8za|rcG?< zt;zZ+I}Q{Ro5B_uaiYN<)s@f3Y7YW#>zv}u5*5-Kh7s7P6iS5;1c@a|iqP?iKT!XE zp1#SUaBnPwhJq@E48-#M|JU&q7!EAGhyhAbHIO3|#S(1LY4B7~Xw2O4ilLRHb;L5F zB)1m93PMU0c;J!=J0G^xw4|m26$qCpg$g~qekzLc;)2FNw(%sM%;`AH9TW8W`SRlj z$`-njzj^I(-#}@2Qn*9dS-JXbHmiAsp95SA#yyO9H*HEQ{tds_=6A(+83UGU`zoL; zJDbbhV!Q5<5R>h@kzwt1mmGw3wzcV+wI)?DZt`5pAex~G?QqeLKFn`LxsxDaviEH# z%R)u%qx^|{eg0K6Uk@ZvCfeQzCgUG$+>~qF1LZzr-t5Jetu4?cfL)fcW)qjR7#Ohv)cny>^ z%F%Z30+tZn(nS}hh^FY37KqiXOHHjVdqK5D8?`vHH<`Rmq-H=51imx5gZ;ZrQQc$2k|BL$HpCz2 zKT|Wmn-pyc4osMh4Eo)o4v+~v3r3?tYa~sZmgJI%JW>KX`4|{^7Q7ekMmwGQbq7z} zqWB%m2i&(G-eHn_fy|CswUbT*LxsIAy4Vjl+%Cpnem?KwzDLTv-$jZdz*iH8qr#Xh zR#{*{mK(^1!=^H!>m4t1QOY(@z7`n}4}(D?Kv_;9`WdMQV!9_-@WtXWG)pX)a*~L< zOl;WmR%{$+F*p)f%%F8-p{xvCx5U$T5%BY9X|sU8d`Pv6Rt${oAQW(Z2cQPeG-I+I z+0~nLpoG>4PQ9sMLKDyOti~3$){cF@Xs}w*7N>EWrEoDX5#1tya83#$P`Vb$S3Ho zPrD1}B138nr>&8QaGDD)uQBOzS(BM1a(W(&Kn`|8pidGgYF$>6PHm)v4%t>x?R^pO zDi0fx&G?e-`i|GHyIePHmko1{Jo%>+qd4c(-MU)Iq=cEoKWh|3&hm_mMbXY>Cbg0I zy7gf>s3zxoObdwhG7yp4OhTq*!%dK$bY9`7_S5XUIp7UC6D8zY_Z3qU z(C*DSfchPTM)9=SK2u~U5EWk10|Uj@1;lhiek5Bp0>qxfu6JWUuoL=GXFt7TER`kp z?Qk3qjx0mJZ?wktqy3Kl}Magb>3+*oqwu02)Al8aW3o+UlC`Mrw|71H`efPQe8USqHHum6$5 zYD<;k4}=)72E>5>`;zzDfN?-M>QhRXkPM%eo~xlHp;5}%jO3cuwDnl%VzLp%kuqRO zc@|1v!qH+B9nX=A)Q6fcBU$i%*RLXhgAJhM>xt$<)yBcT$!FG0zsoFcr@PaZ_&n(3 zI$JaJ1}^(9Z5r+L1`|_5=6FViBZs88PzhvWw4DSWz7)mh+&^G!zPt{gK1! zB0>C4EI%e6$yIj4Xuy@ z$~E70v`j^I^yg3R1fN_H{3tsE49-Fk#PIiOrqoSp3F;&ytdMWTUC04i-U?;GRX4s- zoKe@i_7)S}@Xl>r?-W1?XfLGX0KXr7+U=^=ewWHzg|ane+6^Mc{SV!T^jhv|k6997 zFQ4=A;k?x6FjeG80(O-`bQ4{3@_wuw30?q8>b+hxrxT`18D;4IHs6A8Of zhks9q7fj4#tHk{vi1O{VUWpstfu8mOfBN9TOU*(>27joaV=h_GY8!iXF1bmwaF720 zQvwYJ-*PvDfD8zPbOjUU^Oo1qBn&P8i$n4wda4f}uyK}lg{rtouayYA)=M;2r;X{FbA?s9^ibBwcw){g?!%ptV%`PTn6+O;mU#~4WpkXtX1hJS z^9aWW`1I0%*Jx(`#PO!Q!C92l!@?2dk}_G8k>gP7niy0h^8Wwh_!z09@C*;J)eeY9 zKilvAlLP#=5a-p`#|FAWIA~#y>4jA;Az=#jUyv{A`(9+utBptBYeIHb#&u;3oNq9d zKlyCWS~79&qL!pa#G-3kIHU6XN}9%F4t}aj%WZmTsv&LXk19CE-sHW5q6HN zO9Df+u#$@NW>A9O0M;if#hOV))GA)9t+&RnnwPtEzr`0E`8%YU%Y&kY9 zN>OGcvl}+~j;k@OhPxE5qZ{?^tft&}U#?DsIcj}8H=ODl1!?6C(`ijg@g;Sk=f)PY zqx7aPGYWr{oAF|Tc4Fy#XQ;7P$cW}cYruWtphCgQ*%8m^H{4; zU*||u9i+^kwz+VlOgPqU*~2nXp~b10aV{PGn2ZiJL+^gX4mLs0*U_`SBk6tfq#c%k zg<7DY+BkgwmFgRYbYTET-H%5487BE|BQyAf#yY`}#xvqx(8>ex9uZ$c0(n-DJo`@! zwr7~o07>rWAfY?#vLpG*x5iLbP#`KM^PjT%w2$;E@)3nrE=;X_a)#zI+lx(pq#~7{ntq&{G498ac#cXniL&GOh;H{~> zrHf$I%l~+pbm}|lWJ2a`YG{5do4J-Tg4E~Reg$OgD>^cf!Mut=dIw1|0!1aZgo4f~ z`w9=Hk9>oYd*llXrWH_3|HQDeX{8;fYJS$XVt#>>taP?}p;}1q+3-2{(qrb1v3sDT z+Q7bAY2$K@pFKl{33wO3=61omd7tC-WBpRz@aIuR5fsiCxNRx8Qmh2ju&6r=G>JZDrmTmX41ghPvue~DtjkKxPqiUwgbxXw6;{jq-y2SSTN?9z)(KfYbK@C zE~Yn>fKDlR<0F*JCg0eU-Xz4brgfZMP(t-Mx{v2ZZ{Nz^*ff zPSdbro(hD_IT7{!F1NIT-Lv1%0UAsux_K;(!OmWzg8x9 zkK|01R^D>9`sRQ=E+kyf4SKsy;=;w9QP+xXr;v^)2X+v+UfVr0AWc80Ujx$Ez8TmR zSXp+6@L1JrEGeKj8OWY*1+K!fpQW&i*LfE<)#Uh|vs2_?)h?Vw4OX$9W}Zp&=bL=S zZ>XPKryU;CU4kTO@1jcYc9}~pjb-f6BgiE5aNEh z!C;k$oxe}hL5fE`tIxNTNxL&1cAI~C| zoleDF8U*cUB7de7NP}qVbhl{ft&+P=3v*Iu^QfC{&h1)E^{@)^oy8mbP@Fi;$Zm7> zZ-BmH$pXL3zItRUSOj$00(rXZhrIp9hGc7(`PlcA#SDjz=qFq-#IrpCzgL1S&ZM!IX+r?I}WeV2jBdr2uSf82&K%jrYajSd(_p);1alYW{Do0k!W*tZV|?> zA@RaesH4qCL2%k=T2Ah&nWEdpS~>Pc!nTA|yc)Hjb@pq+;t93AzG>LA_s6LyO4F0B z8T{ya*2aEAP5qhf5Lpo|RGkkmre_Tw`1SfT$}ML1tU$C1`~D`r!9`YiJ|eZoRqzIL zE#@(zLu7i}mSL;#Jt=<2{UmR-gf4TfBk-H+8G=Cu9DruNvxtt8Mr7^Bh@W)B8Pd&l z0cfE#YYp7H$I3B+!37a|$Ekgr8;kQLrfhy|qITy*9{Ood(^jBJDcDa{%3;+mEz%B( zvmcf8XF$iQUnk<;&=WK3v|A_UH`<$`(;SxZ9eycPnY4)d{&mDT&W>4?Je3~fLyEau z_ld7vBlAKJu}s0`xJ#04GT?Q~4$f=t9pLLcwFc%bF0t7o46CebV&b9^M{%aNvmgv3 z94w`f_FeN@T@D-WVsSp72<;P5^y;ju)18$g+6$1Qf-g!S$Gvs^VnXd1J0-bF@5mB5 z!7M%Q-1_`{SbvC(j^K7dhQmj`=<+fRt;<&D`A@RqPkW}~Oo>0j011KOOetd#gcf!? z`!Z1)oY-OdOja3?#3Lc8nE|Sbe);57#cTJk7XPQ6~01cGn^@?h%bJf1DP4*9Lz@upUaAh72L$ z7Z(|a;lq98vqpVmRjLuMhF6_bmO3c4#@Ce3#WT;L-!2?ZTsp|ShI457!Atndb{;_X zgneF_Z8Im%ur(?#tBhIn=!_po8Jv&gm_?w9r7yzUd8oV}0p8LVtb1~z%hdTe| zgy+H9vYwTMXN^H0x!sb)s(!cp>4X~TuL*tMG>BDgrH<|7i@jayOj-7zHSE~eMU}wt z*gv5BKE!B7XG66T&q(zt*r*Bw80Pot-hnSzcxC#TIMw0W!UNW2mC;!`})HAXnlp;I%^M&Fm3| z36RIzepArEc!uEz($?`1Al~D8y`w;6$0{i760{6C=ocdzAQFtxhF4>ld{MVr?z;%( zB+05(yyARJC5xw(P^4RJJNTBjq6JS!BhRD^w^}AvRQf6KW zb@_{740Y%#0XNr~H-f10UfS94X@n+A0*?2#+}DT#>u&|_fXpACD7}(KwgT>Ipi+j) zylA_ESX;&1Z?jxNnJTcCSi42(NH#AOM_%C~)$$w3W*TS)b`sNhXTMQw+Gzl-)pV~d z^GD*C?eNd{r}Ylp*@W&k8ArdB-7r85)H$$Nvq2uJdCTiJ?j7wxYVymt2X2(G&d4%3 zrNg5D_IdoLr$=lomFi+eAzRu{$By=oMr&4KyJiTg+vK8iJT*Vgg=QYG&yQ4kCSvcK zhQPd9>I-`-U_}+143l0@T+E-KRJ)I-XgH4}eaYZ+5+)pl9YcYGpEaT6ISbGpqIa{! zKSW@-gJsjxfpD`<=B}%}JZG1;j4u!LnOv_G?5}4~a&!4jcO7(M$%z?)7 zoZH{VO&j@26n%H=h+OW5%w8b6{zGGfF}w8}7@rSLN7abkZ)&QKpm#g|14(MMV(={Y zk&O=L#mn2oyx8|~;){CMaoRMu4O%trW6W;XBa~fxn|8OW7b7I=_FO4m{&ZycY43kD z1Sw!{UF+qL5WZyd9q+#AgIIhuCgndM8Vi1>4h65#`Ee}!8*F#;G-YldC>-93qnOBkNBFi% zS7DBzrbid9^5ry$`-o_eqes+7VgdVCcmcE8l4@j&={s+1?%)I z6}l~LtVcB48J#+T?TF$!GMJ})gX9Ly^bH~tb&q~lKJbWMn-dh_;}IBMAAvOxrHO1S z+U>(!@1q2q(=pFH(u@_I5$#09L1sUs0_uKXHdG6Mr7L;*E z)VbS)-_J(gsJ?3_s+VAVO~P0~+`mJYqD0J}mn(vum&KYDS_@Te{RJmG`1|7;66R_x zE^a=FL+jgBoj3gHUi{Zkp4+tg5uf`UU+{3Zaf|(5pL>&lhqX5_ z>@QK2nbkCsGw|0c-I3CD9i`Aa*R6%53q-UE^xl`%8Fj}7`aq|RRz4(mhSKkQwQBx+ zf4X1ODk{_)%gf84l=2)*Q*0Qx7XJL$iTV8s8*p$2E#I`rVQb0L@pYhJ&U?k*AKNIH z$)$?y++Hje~Z;MP|+}6yX zWZe){lg2vW`E1k_aW6#CpE!bjp{c?0zHr5o0`{*a&3ilhLu-B zs7eDg=trK{n_^Us6C#+5UF>aDCHR1+zT z^HQgsxqL>6HN$@CR62W*jO#VB^9$v?c!M^)OP)FLkI%mIEU8Gk6+|pORVS3GUui?i z3Hpy;4{cQ8O^iI=a2=&V1+b+(G)0&Znbl^nZVaY7SfQkJ^W)dFjlZU*<74-UcsL|m zO5H{{26d)~sSX~pFs{i*nF#bXX{4JCjho3yn;a^}EG^+1AAks_X4%W7dFiDbr~|`g zSBbCfiv0qB3D}>RsLTaJIqK^}^Fr(Go9gQ2-& z@CP%nxZe8|txD{+#0%n~VC8ziSbfK;m@e+`elMVRM`j&*=-fj%j(XC^vsiS5%S!e& z+%&je$L3IL#Mq8oYa(gIMnWi5sYz;D|Io~rCN=e%q01&Ub`-g9$-H4TJWpGrb@`cL z5@h%NW2~m=DQWX`{=NpS-((rB1WZ?T=qjMdUuVx@qxfC;_0oiVA2P90*;a`>L#?-) zPPb^PBX6`9qM9kD+=6+eq;}!6j)_;D^o3alz@qm@%<-*U>1dXGo>%KP=(&9ge>2d zX51z(5-;!VvS?=v)nsz$kS7fXH`({y_IW#lNa_)SzF`HvLlw{_zu(g6txHQ4lmm5X zWRw^3#ugPF3K!qLNCj26Ehx1f4D)2BUaO^+dcd7`wQRtdXL*Lz?U%Ymyk%raHWbMe#wLU-^&IMh zUC!SADFk9+>gkstk?(wyTZi_HP1d0Pnse)_QZ5bq7Q?>J)6JxZGSi1;-;73({qEeJ zG+r)dv8$lc+NR&Sh~-SdBH!7M}PZojuj6D+C{(p}c~aBnJ`)Z5E(!+Wlb z>XbVP;HrO0>XLSb=~zW#mc@BrbBi!e*4WUeBC(OUatY}81;enP@k!L3M0OT*6I0SQ zDfgw@1;)dn;uawGv27GCPyQuWFNevZraG;KE)>d1JvPwrWQppvlL_1gu=sP|ES56Q2dy93cMNu)|Y7Y}x1MX&HE(_6q z6~H7t3hTJ;X1EP<9bb(9+V`!O-!ad8**vC~%ISKY%|he{SC{5|M)ZF1J0%WQSiJEj zxR$tO_5IA{i)yJ4k<~A=yqnKQO(u?X1@`gP+OLPAeK4Y2V%+q5SbOT?Rd%V8>+*J? zUGLmFzgF(D#ih9g;0)381V-(qtW6zyV-HpG8tzW?Gy!TTe%u0t720DTC=9sTRCpl+)hj z(Yrtez$&eajI%BhrX9Ibf>rX&aHO8BT)}i`NeOIbY5;rmhhh#ypl~Yp!}TPgQ|FZy zdN)5if!snXD>G!$iua!X(BG!f#I%@li z$omxY2iD7hbs=Li`aSjD0u;MC_<3^q*pjbSul3*NL8N}BsIOmjqYp*1b*FFDF;h*) zrJ4KlesIKd@t5c{irdA$4%D#j1S$q{M%HZCYX=%Y`HK&px^yOrpmk^TXL-vV`O67( zGZH(EJj8{ZTP{PAZ@QZMEHt}=iiUH_qfe6fw3}{cw;FYc6Q|iKaZ%?#>1Kw2V~wQN zEo3;Of|gr7nkS=hQH;pA8HV z11lhs1=NJx@m&JUnr*JLR6QTuU#BFj)2+w!-AtY>djqBUO_`5Upw@x@C!?0JcXy;= zln|ooYwjQSSlEZ;~pz(%IL-)751T!YON{s@}V|lB%r_ z(ccPJctwKHj1PEfLwz@2xatevgw6SU6xNpXNki#}`SN08n+(+ISFs*Zfuhuk72r*S zev#b8J5e{QkAN~h_d$*kS_Dh~MYqt^yD{F)2V zfcm&qS7$4<&=~3&lqKYRMc?U!M#B^G`t60zFFr?|lX_C^9;qu8d@bzGU@r@Qt)Ba< z09fHKXjJAG2i)wl3iYw~~XfUxKuzfNH^B5<*2=X%E=pk~KlJ5UFY?(BltIO^X-F9%|OIt(FHO zDWCNi<1(yy)rra?tpm>Z@SIOd85_B>PJ=Ax@}eDIj*y=v2APIiOSkwmu{*=duwIBO zC-%5lsmZdjJrW&rRkHiqy_)LO2{KDBM&-CKUA0}G#uN15bE=70vn6&}ydo*z1?8Mh z`9)_^2P~}@VL_W7 z!lp=wX|m}V+@1%kVqK`|4qVa?J7r4w!^K*KUXASHgrqAi`@FVT&67q{DL6;+t}s7J z6seBFTti9eIMZimhI=7$gSt<{1fC%-p8J!ZH7`27awbDqdk>*${5{$Ch+e8UMa#j$ z6JBhfJwMjSXO?J#+B!n!oxsiz6hoj)k$_7!^s@A}Neo$7*-W(573-{IYH!%paf-Acqo{P+r`!$3}mjg@0U)%f;ef-rUV9ULt z)S&*)aEc(o@MUqaZ|SiYzVMO$Pu*Ed9$a}F=bi0^9+g*1qdA`sopyfO39Ucpj2?*t zcqxx0QnAyls)@{}`zQmlJv_6q6kV+{Ie`K=FjjT&@oG`^vyuQdie9z6@wsB{1pGtL z$eg@b#L>_-j1BT421DcI1592?zvvl;e-J;ps6*S&tl@0zGqxvqG3{F$c^;(R94WvM z-`eYLOo#D%ylu+tXQ(A_mNQBdr486S8G-%%6NPTr*8woAYKvs+O>b=SM(Ws!JEN@4 zJs|DSR~d-JCtdil%u%&2obs%srt1czoj!@tnDquRxX^+i(%6n?LW) z<_8Lup{L;sc40_poV7S#2lu-}-LmhXzDdaFrrc#T&-@xQ(jAWPuOy?Il}1dH4+^C! z-6GVu0BUw7;~a4&VcO;@>n`;D@9FpH1n%jt3kzkqn%|XwqADcLCzSZ06f3z7q9MMm z|A>GtnP_pVznSobP3~FM;1%|!D6GVgC=1lOI7cVUh2xfmWU|3VhIIf3Ywd;8D59=> zHeK0lQXV7ugQq9|oV05(;?N}TeOi}M|tfF;^e7{AY&^5WfN;&Daf;#BZ% z*Z&i?1@I!dTS&Q9$SEWJQ8`kdso~f*l4W~fT2R9XgLsW^Ty4xgTM4CX?^{D1Q4XUeb#%R^ z)8sWR3>?smt-aeJ_9_=`QuPw2r6Zfvup3^C4}Vbd9(}XfQCQyzaM7-G+K6pv|5l@8 z##qxp)qz9ML{FUwWaI3lMxe}+&YYe0j#v@>UAEu{_sncT+|Dn$ne(UgaWl5gV`?nP z!tqIQ;40LSpK?mB;cW$`wljpV=ty2;a?>V$u+Oit#nOpkqK;CgW&%D3b7>32v=!k8- zVUh$0dS!e*&9Dj1aQM7;N7U^#YZq8oEOZ-BTTbi?-emn$QAu; zd8^ZCRV9->ML?zA2qoSz!wY1EMi|d$MK#lOn`AT;lDrg+bFTzGnSMPa>1#@;#CL^g zx|0RI-!P+Wu0J755{@=P=NCi64#BH5V%TVbDZfh{#cp|c(H|rL zc`3S;NeI7kZ1LhIITw^K#>mYN>BFN5p$U*^G{wbPp;4c%{ z;7K@i@bDNqVAP4v4GnU4i9n9!_=ke5@xlKGL-v!YsRH?*jQ?h!B1j+*A^80SCE&Em7F>!1IT1W4D7wEOW^xE50CRYz z?Upo&^NNV^PsNOrBml$6U`V_o6cj|t5Q_Y-W8j(oKmiaaJr!FA5z+!xw28zg5v+zFF+#ots$V`ALzHNoD}3j$w6xTyEZZh@XRa#oZx~B&V-!S zZ`m|_2xtWXAx-qJ(&7jK`2qh(he53OU%R28aGn3ubhHcr{1%)kh3q_nEPtOjvF9K0 ziM%;nz<-rqAlY92{^Lkqf1nXR0x;TFbim)@761OtJl{W{;a76Nf8_}vw*Na~=AV)r z@CW+*l^*c9`9E>X5Zjmhhk&dtefR^>&T|6(jzIl4 Date: Thu, 30 Aug 2018 10:32:08 +0300 Subject: [PATCH 15/52] [DOC] Repository GCS ADC not supported (#33238) Make it clear that automatic default credentials (ADC) is not supported for the repository-gcs plugin. "Service Account" method is the only alternative to authn requests to Google Cloud Storage. --- docs/plugins/repository-gcs.asciidoc | 139 ++++++++++++++------------- 1 file changed, 70 insertions(+), 69 deletions(-) diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 918b499a29603..c8b760d3a9c8f 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -10,71 +10,66 @@ include::install_remove.asciidoc[] [[repository-gcs-usage]] ==== Getting started -The plugin uses the https://cloud.google.com/storage/docs/json_api/[Google Cloud Storage JSON API] (v1) -to connect to the Storage service. If this is the first time you use Google Cloud Storage, you first -need to connect to the https://console.cloud.google.com/[Google Cloud Platform Console] and create a new -project. Once your project is created, you must enable the Cloud Storage Service for your project. +The plugin uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage] +to connect to the Storage service. If you are using +https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you +must connect to the https://console.cloud.google.com/[Google Cloud Platform Console] +and create a new project. After your project is created, you must enable the +Cloud Storage Service for your project. [[repository-gcs-creating-bucket]] ===== Creating a Bucket -Google Cloud Storage service uses the concept of https://cloud.google.com/storage/docs/key-terms[Bucket] -as a container for all the data. Buckets are usually created using the -https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin will not automatically -create buckets. +The Google Cloud Storage service uses the concept of a +https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all +the data. Buckets are usually created using the +https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin +does not automatically create buckets. To create a new bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Click the "Create Bucket" button -5. Enter the name of the new bucket -6. Select a storage class -7. Select a location -8. Click the "Create" button +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Click the *Create Bucket* button. +5. Enter the name of the new bucket. +6. Select a storage class. +7. Select a location. +8. Click the *Create* button. -The bucket should now be created. +For more detailed instructions, see the +https://cloud.google.com/storage/docs/quickstart-console#create_a_bucket[Google Cloud documentation]. [[repository-gcs-service-authentication]] ===== Service Authentication -The plugin supports two authentication modes: - -* The built-in <>. This mode is -recommended if your Elasticsearch node is running on a Compute Engine virtual machine. - -* Specifying <> credentials. - -[[repository-gcs-using-compute-engine]] -===== Using Compute Engine -When running on Compute Engine, the plugin use Google's built-in authentication mechanism to -authenticate on the Storage service. Compute Engine virtual machines are usually associated to a -default service account. This service account can be found in the VM instance details in the -https://console.cloud.google.com/compute/[Compute Engine console]. - -This is the default authentication mode and requires no configuration. - -NOTE: The Compute Engine VM must be allowed to use the Storage service. This can be done only at VM -creation time, when "Storage" access can be configured to "Read/Write" permission. Check your -instance details at the section "Cloud API access scopes". +The plugin must authenticate the requests it makes to the Google Cloud Storage +service. It is common for Google client libraries to employ a strategy named https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application[application default credentials]. +However, that strategy is **not** supported for use with Elasticsearch. The +plugin operates under the Elasticsearch process, which runs with the security +manager enabled. The security manager obstructs the "automatic" credential discovery. +Therefore, you must configure <> +credentials even if you are using an environment that does not normally require +this configuration (such as Compute Engine, Kubernetes Engine or App Engine). [[repository-gcs-using-service-account]] ===== Using a Service Account -If your Elasticsearch node is not running on Compute Engine, or if you don't want to use Google's -built-in authentication mechanism, you can authenticate on the Storage service using a -https://cloud.google.com/iam/docs/overview#service_account[Service Account] file. +You have to obtain and provide https://cloud.google.com/iam/docs/overview#service_account[service account credentials] +manually. + +For detailed information about generating JSON service account files, see the https://cloud.google.com/storage/docs/authentication?hl=en#service_accounts[Google Cloud documentation]. +Note that the PKCS12 format is not supported by this plugin. -To create a service account file: +Here is a summary of the steps: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/permissions[Permission] tab -4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab -5. Click on "Create service account" -6. Once created, select the new service account and download a JSON key file +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/permissions[Permission] tab. +4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab. +5. Click *Create service account*. +6. After the account is created, select it and download a JSON key file. -A service account file looks like this: +A JSON service account file looks like this: [source,js] ---- @@ -84,19 +79,26 @@ A service account file looks like this: "private_key_id": "...", "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", - "client_id": "..." + "client_id": "...", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" } ---- // NOTCONSOLE -This file must be stored in the {ref}/secure-settings.html[elasticsearch keystore], under a setting name -of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client configuration. -The default client name is `default`, but a different client name can be specified in repository -settings using `client`. +To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME` +is the name of the client configuration for the repository. The implicit client +name is `default`, but a different client name can be specified in the +repository settings with the `client` key. -For example, if specifying the credentials file in the keystore under -`gcs.client.my_alternate_client.credentials_file`, you can configure a repository to use these -credentials like this: +NOTE: Passing the file path via the GOOGLE_APPLICATION_CREDENTIALS environment +variable is **not** supported. + +For example, if you added a `gcs.client.my_alternate_client.credentials_file` +setting in the keystore, you can configure a repository to use those credentials +like this: [source,js] ---- @@ -113,19 +115,18 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] The `credentials_file` settings are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, the internal `gcs` clients, used to transfer the -snapshot contents, will utilize the latest settings from the keystore. - +After you reload the settings, the internal `gcs` clients, which are used to +transfer the snapshot contents, utilize the latest settings from the keystore. -NOTE: In progress snapshot/restore jobs will not be preempted by a *reload* -of the client's `credentials_file` settings. They will complete using the client -as it was built when the operation started. +NOTE: Snapshot or restore jobs that are in progress are not preempted by a *reload* +of the client's `credentials_file` settings. They complete using the client as +it was built when the operation started. [[repository-gcs-client]] ==== Client Settings The client used to connect to Google Cloud Storage has a number of settings available. -Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and specified +Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and are specified inside `elasticsearch.yml`. The default client name looked up by a `gcs` repository is called `default`, but can be customized with the repository setting `client`. @@ -146,7 +147,7 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] Some settings are sensitive and must be stored in the -{ref}/secure-settings.html[elasticsearch keystore]. This is the case for the service account file: +{ref}/secure-settings.html[Elasticsearch keystore]. This is the case for the service account file: [source,sh] ---- @@ -185,7 +186,7 @@ are marked as `Secure`. `project_id`:: - The Google Cloud project id. This will be automatically infered from the credentials file but + The Google Cloud project id. This will be automatically inferred from the credentials file but can be specified explicitly. For example, it can be used to switch between projects when the same credentials are usable for both the production and the development projects. @@ -249,8 +250,8 @@ The following settings are supported: The service account used to access the bucket must have the "Writer" access to the bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Select the bucket and "Edit bucket permission" -5. The service account must be configured as a "User" with "Writer" access +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Select the bucket and "Edit bucket permission". +5. The service account must be configured as a "User" with "Writer" access. From 49399b3551f8e6c58b5575e87667a63d71746d1c Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 30 Aug 2018 09:38:23 +0100 Subject: [PATCH 16/52] Fix docs build after #33241 Recently-merged PR #33241 broke the docs build, and this fixes it. --- docs/reference/modules/discovery/zen.asciidoc | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index d90be42d9178a..e9be7aa52e890 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -68,14 +68,14 @@ startup. To enable file-based discovery, configure the `file` hosts provider as follows: -``` +[source,txt] +---------------------------------------------------------------- discovery.zen.hosts_provider: file -``` +---------------------------------------------------------------- -Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in -<>. Any time a change is made -to the `unicast_hosts.txt` file the new changes will be picked up by -Elasticsearch and the new hosts list will be used. +Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in the format described +below. Any time a change is made to the `unicast_hosts.txt` file the new +changes will be picked up by Elasticsearch and the new hosts list will be used. Note that the file-based discovery plugin augments the unicast hosts list in `elasticsearch.yml`: if there are valid unicast host entries in @@ -86,10 +86,6 @@ The `discovery.zen.ping.unicast.resolve_timeout` setting also applies to DNS lookups for nodes specified by address via file-based discovery. This is specified as a <> and defaults to 5s. -[[discovery-file-format]] -[float] -====== unicast_hosts.txt file format - The format of the file is to specify one node entry per line. Each node entry consists of the host (host name or IP address) and an optional transport port number. If the port number is specified, is must come immediately after the From 6a699ad7acc2062e8aa6bda8e8d2ac9c5f0926c9 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Thu, 30 Aug 2018 11:41:39 +0300 Subject: [PATCH 17/52] Ignore module-info in jar hell checks (#33011) * Ignore module-info in JarHell checks * Add unit test * integration test to test that jarhell is ran with precommit --- buildSrc/build.gradle | 1 - .../gradle/precommit/JarHellTask.groovy | 8 +++- .../gradle/precommit/PrecommitTasks.groovy | 10 ++++- .../gradle/BuildExamplePluginsIT.java | 13 ------ ...portElasticsearchBuildResourcesTaskIT.java | 8 ++-- .../gradle/precommit/JarHellTaskIT.java | 42 +++++++++++++++++++ .../test/GradleIntegrationTestCase.java | 31 ++++++++++++-- buildSrc/src/testKit/jarHell/build.gradle | 29 +++++++++++++ .../java/org/apache/logging/log4j/Logger.java | 7 ++++ .../org/elasticsearch/bootstrap/JarHell.java | 4 ++ .../elasticsearch/bootstrap/JarHellTests.java | 22 ++++++++++ 11 files changed, 149 insertions(+), 26 deletions(-) create mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java create mode 100644 buildSrc/src/testKit/jarHell/build.gradle create mode 100644 buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 25d2a97302e91..dce14b10fcb8c 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -176,7 +176,6 @@ if (project != rootProject) { it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} } exclude "**/*Tests.class" - include "**/*IT.class" testClassesDirs = sourceSets.test.output.classesDirs classpath = sourceSets.test.runtimeClasspath inputs.dir(file("src/testKit")) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy index 4299efd95a383..119a02764994c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -22,8 +22,8 @@ package org.elasticsearch.gradle.precommit import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.elasticsearch.gradle.LoggedExec import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.Classpath import org.gradle.api.tasks.OutputFile - /** * Runs CheckJarHell on a classpath. */ @@ -35,9 +35,13 @@ public class JarHellTask extends LoggedExec { * inputs (ie the jars/class files). */ @OutputFile - File successMarker = new File(project.buildDir, 'markers/jarHell') + File successMarker + + @Classpath + FileCollection classpath public JarHellTask() { + successMarker = new File(project.buildDir, 'markers/jarHell-' + getName()) project.afterEvaluate { FileCollection classpath = project.sourceSets.test.runtimeClasspath if (project.plugins.hasPlugin(ShadowPlugin)) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 60469622484e5..be7561853bbb2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -31,7 +31,7 @@ class PrecommitTasks { /** Adds a precommit task, which depends on non-test verification tasks. */ public static Task create(Project project, boolean includeDependencyLicenses) { - Configuration forbiddenApisConfiguration = project.configurations.create("forbiddenApisCliJar") + project.configurations.create("forbiddenApisCliJar") project.dependencies { forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5') } @@ -43,7 +43,7 @@ class PrecommitTasks { project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('filepermissions', FilePermissionsTask.class), - project.tasks.create('jarHell', JarHellTask.class), + configureJarHell(project), configureThirdPartyAudit(project) ] @@ -80,6 +80,12 @@ class PrecommitTasks { return project.tasks.create(precommitOptions) } + private static Task configureJarHell(Project project) { + Task task = project.tasks.create('jarHell', JarHellTask.class) + task.classpath = project.sourceSets.test.runtimeClasspath + return task + } + private static Task configureThirdPartyAudit(Project project) { ThirdPartyAuditTask thirdPartyAuditTask = project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 3e18b0b80af3f..aca9906701150 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -153,17 +153,4 @@ private Path writeBuildScript(String script) { } } - private String getLocalTestRepoPath() { - String property = System.getProperty("test.local-test-repo-path"); - Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); - File file = new File(property); - assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); - if (File.separator.equals("\\")) { - // Use / on Windows too, the build script is not happy with \ - return file.getAbsolutePath().replace(File.separator, "/"); - } else { - return file.getAbsolutePath(); - } - } - } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 98fea2ea15ab6..99afd0bcbe0ae 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -40,7 +40,7 @@ public void testUpToDateWithSourcesConfigured() { .withArguments("buildResources", "-s", "-i") .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); + assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); @@ -61,8 +61,8 @@ public void testImplicitTaskDependencyCopy() { .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); - assertTaskSuccessfull(result, ":sampleCopyAll"); + assertTaskSuccessful(result, ":buildResources"); + assertTaskSuccessful(result, ":sampleCopyAll"); assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle.xml"); // This is a side effect of compile time reference assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle_suppressions.xml"); @@ -75,7 +75,7 @@ public void testImplicitTaskDependencyInputFileOfOther() { .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":sample"); + assertTaskSuccessful(result, ":sample"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java new file mode 100644 index 0000000000000..03f2022bc66e8 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -0,0 +1,42 @@ +package org.elasticsearch.gradle.precommit; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +public class JarHellTaskIT extends GradleIntegrationTestCase { + + public void testJarHellDetected() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("jarHell")) + .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .withPluginClasspath() + .buildAndFail(); + + assertTaskFailed(result, ":jarHell"); + assertOutputContains( + result.getOutput(), + "Exception in thread \"main\" java.lang.IllegalStateException: jar hell!", + "class: org.apache.logging.log4j.Logger" + ); + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f00ab406a6c10..a1d4b86ab760c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -9,6 +9,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -66,15 +67,24 @@ protected void assertOutputDoesNotContain(String output, String... lines) { } } - protected void assertTaskSuccessfull(BuildResult result, String taskName) { + protected void assertTaskFailed(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.FAILED); + } + + protected void assertTaskSuccessful(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + } + + private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { BuildTask task = result.task(taskName); if (task == null) { - fail("Expected task `" + taskName + "` to be successful, but it did not run"); + fail("Expected task `" + taskName + "` to be " + taskOutcome +", but it did not run" + + "\n\nOutput is:\n" + result.getOutput()); } assertEquals( "Expected task to be successful but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput() , - TaskOutcome.SUCCESS, + taskOutcome + "\n\nOutput is:\n" + result.getOutput() , + taskOutcome, task.getOutcome() ); } @@ -109,4 +119,17 @@ protected void assertBuildFileDoesNotExists(BuildResult result, String projectNa Files.exists(absPath) ); } + + protected String getLocalTestRepoPath() { + String property = System.getProperty("test.local-test-repo-path"); + Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); + File file = new File(property); + assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); + if (File.separator.equals("\\")) { + // Use / on Windows too, the build script is not happy with \ + return file.getAbsolutePath().replace(File.separator, "/"); + } else { + return file.getAbsolutePath(); + } + } } diff --git a/buildSrc/src/testKit/jarHell/build.gradle b/buildSrc/src/testKit/jarHell/build.gradle new file mode 100644 index 0000000000000..17ff43fc7403b --- /dev/null +++ b/buildSrc/src/testKit/jarHell/build.gradle @@ -0,0 +1,29 @@ +plugins { + id 'java' + id 'elasticsearch.build' +} + +dependencyLicenses.enabled = false +dependenciesInfo.enabled = false +forbiddenApisMain.enabled = false +forbiddenApisTest.enabled = false +thirdPartyAudit.enabled = false +namingConventions.enabled = false +ext.licenseFile = file("$buildDir/dummy/license") +ext.noticeFile = file("$buildDir/dummy/notice") + +repositories { + mavenCentral() + repositories { + maven { + url System.getProperty("local.repo.path") + } + } +} + +dependencies { + // Needed for the JarHell task + testCompile ("org.elasticsearch.test:framework:${versions.elasticsearch}") + // causes jar hell with local sources + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" +} diff --git a/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java new file mode 100644 index 0000000000000..a4332c664fa38 --- /dev/null +++ b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java @@ -0,0 +1,7 @@ +package org.apache.logging.log4j; + +// Jar Hell ! +public class Logger { + +} + diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index e171daeb79b85..3de0ae5117e6a 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -255,6 +255,10 @@ public static void checkJavaVersion(String resource, String targetVersion) { } private static void checkClass(Map clazzes, String clazz, Path jarpath) { + if (clazz.equals("module-info") || clazz.endsWith(".module-info")) { + // Ignore jigsaw module descriptions + return; + } Path previous = clazzes.put(clazz, jarpath); if (previous != null) { if (previous.equals(jarpath)) { diff --git a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index e58268ef19251..95c56f94ee4e1 100644 --- a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -76,6 +76,28 @@ public void testDifferentJars() throws Exception { } } + public void testModuleInfo() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "module-info.class"), + makeJar(dir, "bar.jar", null, "module-info.class") + ), + logger::debug + ); + } + + public void testModuleInfoPackage() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "foo/bar/module-info.class"), + makeJar(dir, "bar.jar", null, "foo/bar/module-info.class") + ), + logger::debug + ); + } + public void testDirsOnClasspath() throws Exception { Path dir1 = createTempDir(); Path dir2 = createTempDir(); From e5f8a22e7e4f5875086af9ee98735d1244d6c21b Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 30 Aug 2018 10:53:01 +0200 Subject: [PATCH 18/52] Watcher: Ensure TriggerEngine start replaces existing watches (#33157) This commit ensures that when `TriggerService.start()` is called, we ensure in the trigger engine implementations that current watches are removed instead of adding to the existing ones in `TickerScheduleTriggerEngine.start()` Two additional minor fixes, where the result remains the same but less code gets executed. 1. If the node is not a data node, we forgot to set the status to STARTING when watcher is being started. This should not be a big issue, because a non-data node does not spent a lot of time loading as there are no watches which need loading. 2. If a new cluster state came in during a reload, we had two checks in place to abort loading the current one. The first one before we load all the watches of the local node and the second before watcher is starting with those new watches. Turned out that the first check was not returning, which meant we always tried to load all the watches, and then would fail on the second check. This has been fixed here. --- .../watcher/WatcherLifeCycleService.java | 1 + .../xpack/watcher/WatcherService.java | 10 +++-- .../engine/TickerScheduleTriggerEngine.java | 2 +- .../engine/TickerScheduleEngineTests.java | 44 ++++++++++++++++++- 4 files changed, 51 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 127425308b615..e2246214315b8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -113,6 +113,7 @@ public void clusterChanged(ClusterChangedEvent event) { // if this is not a data node, we need to start it ourselves possibly if (event.state().nodes().getLocalNode().isDataNode() == false && isWatcherStoppedManually == false && this.state.get() == WatcherState.STOPPED) { + this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); return; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 49915674fe9e2..599287bb50a76 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -183,9 +183,6 @@ void reload(ClusterState state, String reason) { // by checking the cluster state version before and after loading the watches we can potentially just exit without applying the // changes processedClusterStateVersion.set(state.getVersion()); - triggerService.pauseExecution(); - int cancelledTaskCount = executionService.clearExecutionsAndQueue(); - logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> logger.error("error reloading watcher", e))); @@ -221,6 +218,7 @@ private synchronized boolean reloadInner(ClusterState state, String reason, bool if (processedClusterStateVersion.get() != state.getVersion()) { logger.debug("watch service has not been reloaded for state [{}], another reload for state [{}] in progress", state.getVersion(), processedClusterStateVersion.get()); + return false; } Collection watches = loadWatches(state); @@ -231,7 +229,13 @@ private synchronized boolean reloadInner(ClusterState state, String reason, bool // if we had another state coming in the meantime, we will not start the trigger engines with these watches, but wait // until the others are loaded + // also this is the place where we pause the trigger service execution and clear the current execution service, so that we make sure + // that existing executions finish, but no new ones are executed if (processedClusterStateVersion.get() == state.getVersion()) { + triggerService.pauseExecution(); + int cancelledTaskCount = executionService.clearExecutionsAndQueue(); + logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); + executionService.unPause(); triggerService.start(watches); if (triggeredWatches.isEmpty() == false) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index 05aa7cf302817..4c10f794880b9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -56,7 +56,7 @@ public synchronized void start(Collection jobs) { schedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); } } - this.schedules.putAll(schedules); + this.schedules = schedules; } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 7949998867b48..6680b38ab94b3 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -35,7 +35,9 @@ import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.daily; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.weekly; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; import static org.joda.time.DateTimeZone.UTC; import static org.mockito.Mockito.mock; @@ -50,8 +52,12 @@ public void init() throws Exception { } private TriggerEngine createEngine() { - return new TickerScheduleTriggerEngine(Settings.EMPTY, - mock(ScheduleRegistry.class), clock); + Settings settings = Settings.EMPTY; + // having a low value here speeds up the tests tremendously, we still want to run with the defaults every now and then + if (usually()) { + settings = Settings.builder().put(TickerScheduleTriggerEngine.TICKER_INTERVAL_SETTING.getKey(), "10ms").build(); + } + return new TickerScheduleTriggerEngine(settings, mock(ScheduleRegistry.class), clock); } private void advanceClockIfNeeded(DateTime newCurrentDateTime) { @@ -104,6 +110,40 @@ public void accept(Iterable events) { assertThat(bits.cardinality(), is(count)); } + public void testStartClearsExistingSchedules() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + List firedWatchIds = new ArrayList<>(); + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + firedWatchIds.add(event.jobName()); + } + latch.countDown(); + } + }); + + int count = randomIntBetween(2, 5); + List watches = new ArrayList<>(); + for (int i = 0; i < count; i++) { + watches.add(createWatch(String.valueOf(i), interval("1s"))); + } + engine.start(watches); + + watches.clear(); + for (int i = 0; i < count; i++) { + watches.add(createWatch("another_id" + i, interval("1s"))); + } + engine.start(watches); + + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!latch.await(3 * count, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(firedWatchIds, everyItem(startsWith("another_id"))); + } + public void testAddHourly() throws Exception { final String name = "job_name"; final CountDownLatch latch = new CountDownLatch(1); From ffbf40a53ae4206f9dc84d721bb0587feac28851 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Thu, 30 Aug 2018 13:59:19 +0300 Subject: [PATCH 19/52] [DOCS] TLS file resources are reloadable (#33258) Make clearer that file resources that are used as key trust material are polled and will be reloaded upon modification. --- .../securing-communications/tls-http.asciidoc | 12 +++++++++++- .../securing-communications/tls-transport.asciidoc | 12 +++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc index eb8e985a65b59..06e70b036735e 100644 --- a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc @@ -77,7 +77,17 @@ bin/elasticsearch-keystore add xpack.security.http.ssl.secure_key_passphrase . Restart {es}. -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== diff --git a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc index c186aebbe2433..c2306545536aa 100644 --- a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc @@ -95,7 +95,17 @@ vice-versa). After enabling TLS you must restart all nodes in order to maintain communication across the cluster. -- -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== From 32b63fd5691008042609e508344114652bee8e96 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 30 Aug 2018 15:15:50 +0200 Subject: [PATCH 20/52] Fix nested _source retrieval with includes/excludes (#33180) If an exclude or an include clause removes an entry to a nested field in the original source at query time, the creation of nested hits fails with an NPE. This change fixes this exception and replaces the nested document source with an empty map. Closes #33163 Closes #33170 --- .../fetch/subphase/FetchSourceSubPhase.java | 4 ++ .../subphase/FetchSourceSubPhaseTests.java | 40 ++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java index 2da74c56f6a33..a7f333abfa2ef 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java @@ -57,6 +57,7 @@ public void hitExecute(SearchContext context, HitContext hitContext) { if (nestedHit) { value = getNestedSource((Map) value, hitContext); } + try { final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); @@ -81,6 +82,9 @@ public void hitExecute(SearchContext context, HitContext hitContext) { private Map getNestedSource(Map sourceAsMap, HitContext hitContext) { for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) { sourceAsMap = (Map) sourceAsMap.get(o.getField().string()); + if (sourceAsMap == null) { + return null; + } } return sourceAsMap; } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index 5cc4e2ddc68a7..7790e8d6576ca 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -34,6 +34,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.Map; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -78,6 +79,29 @@ public void testMultipleFiltering() throws IOException { assertEquals(Collections.singletonMap("field","value"), hitContext.hit().getSourceAsMap()); } + public void testNestedSource() throws IOException { + Map expectedNested = Collections.singletonMap("nested2", Collections.singletonMap("field", "value0")); + XContentBuilder source = XContentFactory.jsonBuilder().startObject() + .field("field", "value") + .field("field2", "value2") + .field("nested1", expectedNested) + .endObject(); + FetchSubPhase.HitContext hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(expectedNested, hitContext.hit().getSourceAsMap()); + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.singletonMap("field", "value0"), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + } + public void testSourceDisabled() throws IOException { FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null); assertNull(hitContext.hit().getSourceAsMap()); @@ -96,17 +120,29 @@ public void testSourceDisabled() throws IOException { } private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) { + return hitExecute(source, fetchSource, include, exclude, null); + } + + + private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude, + SearchHit.NestedIdentity nestedIdentity) { return hitExecuteMultiple(source, fetchSource, include == null ? Strings.EMPTY_ARRAY : new String[]{include}, - exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}); + exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}, nestedIdentity); } private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes) { + return hitExecuteMultiple(source, fetchSource, includes, excludes, null); + } + + private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes, + SearchHit.NestedIdentity nestedIdentity) { FetchSourceContext fetchSourceContext = new FetchSourceContext(fetchSource, includes, excludes); SearchContext searchContext = new FetchSourceSubPhaseTestSearchContext(fetchSourceContext, source == null ? null : BytesReference.bytes(source)); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - hitContext.reset(new SearchHit(1, null, null, null), null, 1, null); + final SearchHit searchHit = new SearchHit(1, null, null, nestedIdentity, null); + hitContext.reset(searchHit, null, 1, null); FetchSourceSubPhase phase = new FetchSourceSubPhase(); phase.hitExecute(searchContext, hitContext); return hitContext; From 34f13f88c4f36f6945900da15275d24d500023e4 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 30 Aug 2018 18:07:58 +0200 Subject: [PATCH 21/52] Fix serialization of empty field capabilities response (#33263) Fix serialization of empty field capabilities response When no response are required (no indices match the requested patterns) the empty response throws an NPE in the transport serialization (writeTo). --- .../action/fieldcaps/FieldCapabilities.java | 4 +- .../fieldcaps/FieldCapabilitiesRequest.java | 1 - .../fieldcaps/FieldCapabilitiesResponse.java | 8 +-- .../TransportFieldCapabilitiesAction.java | 2 +- .../java/org/elasticsearch/client/Client.java | 2 +- .../client/support/AbstractClient.java | 4 +- .../FieldCapabilitiesResponseTests.java | 53 +++++++++++++++---- 7 files changed, 54 insertions(+), 20 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index 21bb452430e7a..5cfdba9294634 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -166,14 +166,14 @@ public String getName() { } /** - * Whether this field is indexed for search on all indices. + * Whether this field can be aggregated on all indices. */ public boolean isAggregatable() { return isAggregatable; } /** - * Whether this field can be aggregated on all indices. + * Whether this field is indexed for search on all indices. */ public boolean isSearchable() { return isSearchable; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index e91d9a703f491..c61f16962e678 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -123,7 +123,6 @@ public String[] fields() { } /** - * * The list of indices to lookup */ public FieldCapabilitiesRequest indices(String... indices) { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 959b4e572b714..806bfad77c05f 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -36,6 +36,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Collectors; /** @@ -57,15 +58,15 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont private FieldCapabilitiesResponse(Map> responseMap, List indexResponses) { - this.responseMap = responseMap; - this.indexResponses = indexResponses; + this.responseMap = Objects.requireNonNull(responseMap); + this.indexResponses = Objects.requireNonNull(indexResponses); } /** * Used for serialization */ FieldCapabilitiesResponse() { - this.responseMap = Collections.emptyMap(); + this(Collections.emptyMap(), Collections.emptyList()); } /** @@ -82,6 +83,7 @@ public Map> get() { List getIndexResponses() { return indexResponses; } + /** * * Get the field capabilities per type for the provided {@code field}. diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 2adea56730ee4..cf4515c5feb64 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -89,7 +89,7 @@ protected void doExecute(FieldCapabilitiesRequest request, } }; if (totalNumRequest == 0) { - listener.onResponse(new FieldCapabilitiesResponse()); + listener.onResponse(new FieldCapabilitiesResponse(Collections.emptyMap())); } else { ActionListener innerListener = new ActionListener() { @Override diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index adb2f509b999e..f97f618347af5 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -455,7 +455,7 @@ public interface Client extends ElasticsearchClient, Releasable { /** * Builder for the field capabilities request. */ - FieldCapabilitiesRequestBuilder prepareFieldCaps(); + FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices); /** * An action that returns the field capabilities from the provided request diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index f2bfe38d81097..189017dfa606f 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -657,8 +657,8 @@ public ActionFuture fieldCaps(FieldCapabilitiesReques } @Override - public FieldCapabilitiesRequestBuilder prepareFieldCaps() { - return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE); + public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) { + return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices); } static class Admin implements AdminClient { diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index b38240632421a..90b730660ddd9 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -28,11 +28,15 @@ import org.elasticsearch.test.AbstractStreamableXContentTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Predicate; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; + public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase { @@ -48,22 +52,46 @@ protected FieldCapabilitiesResponse createBlankInstance() { @Override protected FieldCapabilitiesResponse createTestInstance() { - Map> responses = new HashMap<>(); + if (randomBoolean()) { + // merged responses + Map> responses = new HashMap<>(); + + String[] fields = generateRandomStringArray(5, 10, false, true); + assertNotNull(fields); + + for (String field : fields) { + Map typesToCapabilities = new HashMap<>(); + String[] types = generateRandomStringArray(5, 10, false, false); + assertNotNull(types); + + for (String type : types) { + typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); + } + responses.put(field, typesToCapabilities); + } + return new FieldCapabilitiesResponse(responses); + } else { + // non-merged responses + List responses = new ArrayList<>(); + int numResponse = randomIntBetween(0, 10); + for (int i = 0; i < numResponse; i++) { + responses.add(createRandomIndexResponse()); + } + return new FieldCapabilitiesResponse(responses); + } + } + + + private FieldCapabilitiesIndexResponse createRandomIndexResponse() { + Map responses = new HashMap<>(); String[] fields = generateRandomStringArray(5, 10, false, true); assertNotNull(fields); for (String field : fields) { - Map typesToCapabilities = new HashMap<>(); - String[] types = generateRandomStringArray(5, 10, false, false); - assertNotNull(types); - - for (String type : types) { - typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); - } - responses.put(field, typesToCapabilities); + responses.put(field, FieldCapabilitiesTests.randomFieldCaps(field)); } - return new FieldCapabilitiesResponse(responses); + return new FieldCapabilitiesIndexResponse(randomAsciiLettersOfLength(10), responses); } @Override @@ -138,6 +166,11 @@ public void testToXContent() throws IOException { "}").replaceAll("\\s+", ""), generatedResponse); } + public void testEmptyResponse() throws IOException { + FieldCapabilitiesResponse testInstance = new FieldCapabilitiesResponse(); + assertSerialization(testInstance); + } + private static FieldCapabilitiesResponse createSimpleResponse() { Map titleCapabilities = new HashMap<>(); titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false)); From 0a433479d68a796328e4c4888c1f2e8661af41d6 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 30 Aug 2018 14:26:32 -0400 Subject: [PATCH 22/52] TEST: Mute testMonitorClusterHealth Tracked at #32299 --- .../org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index f56f96efc7883..f7ecb6d58e522 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -106,6 +106,7 @@ protected Settings restAdminSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32299") public void testMonitorClusterHealth() throws Exception { String watchId = "cluster_health_watch"; From 61e0ce705424e986e7a43d5000674fc5b1cb77d0 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 31 Aug 2018 00:13:03 +0300 Subject: [PATCH 23/52] SQL: prevent duplicate generation for repeated aggs (#33252) Prevent generation of duplicate aggs caused by repetitive functions, leading to invalid query. Fix #30287 (cherry picked from commit 83c3d7a6cfbb469888bdf1429489a7dc07db6020) --- .../org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java | 3 +++ x-pack/qa/sql/src/main/resources/agg.sql-spec | 10 +++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java index 5fb8a754f0f54..b8faedec71878 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java @@ -112,6 +112,9 @@ public Aggs addGroups(Collection groups) { } public Aggs addAgg(LeafAgg agg) { + if (metricAggs.contains(agg)) { + return this; + } return new Aggs(groups, combine(metricAggs, agg), pipelineAggs); } diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec index f42ce0ef7a092..f1ab9160b1af4 100644 --- a/x-pack/qa/sql/src/main/resources/agg.sql-spec +++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec @@ -394,4 +394,12 @@ SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM aggMultiWithHavingOnCount SELECT MIN(salary) min, MAX(salary) max, gender g, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g HAVING c > 40 ORDER BY gender; aggMultiGroupByMultiWithHavingOnCount -SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; \ No newline at end of file +SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; + +// repetion of same aggs to check whether the generated query contains duplicates or not +aggRepeatFunctionAcrossFields +SELECT MIN(emp_no) AS a, 1 + MIN(emp_no) AS b, ABS(MIN(emp_no)) AS c FROM test_emp; +aggRepeatFunctionBetweenSelectAndHaving +SELECT gender, COUNT(DISTINCT languages) AS c FROM test_emp GROUP BY gender HAVING count(DISTINCT languages) > 0 ORDER BY gender; + + From 1319b402916e8895ac99d61003e27ffe37c46e03 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Thu, 30 Aug 2018 14:33:32 -0700 Subject: [PATCH 24/52] Painless: Fix Bindings Bug (#33274) When the change was made to the format for in the whitelist for bindings, parameters from both the constructor and the method were combined into a single list instead of separate lists. The check for method parameters was being executed from the start of the combined list rather than the correct position. The tests for bindings used a constructor and a method that only used the int types so this was not caught. The test has been changed to also use a double type and this issue is fixed. --- .../main/java/org/elasticsearch/painless/BindingTest.java | 4 ++-- .../painless/lookup/PainlessLookupBuilder.java | 2 +- .../org/elasticsearch/painless/spi/org.elasticsearch.txt | 2 +- .../test/java/org/elasticsearch/painless/BindingsTests.java | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java index 1dcbce037b264..fc2a10891f623 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java @@ -26,7 +26,7 @@ public BindingTest(int state0, int state1) { this.state = state0 + state1; } - public int testAddWithState(int stateless) { - return stateless + state; + public int testAddWithState(int istateless, double dstateless) { + return istateless + state + (int)dstateless; } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index 7adc816252059..a64814f866113 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -908,7 +908,7 @@ public void addPainlessBinding(Class targetClass, String methodName, Class int methodTypeParametersSize = javaMethod.getParameterCount(); for (int typeParameterIndex = 0; typeParameterIndex < methodTypeParametersSize; ++typeParameterIndex) { - Class typeParameter = typeParameters.get(typeParameterIndex); + Class typeParameter = typeParameters.get(constructorTypeParametersSize + typeParameterIndex); if (isValidType(typeParameter) == false) { throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index b74720b2d61f2..853a48c918e20 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -181,5 +181,5 @@ class org.elasticsearch.painless.FeatureTest no_import { # for testing static { - int testAddWithState(int, int, int) bound_to org.elasticsearch.painless.BindingTest + int testAddWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingTest } \ No newline at end of file diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java index c6d4e1974c14b..4bcc557d3dcff 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java @@ -28,11 +28,11 @@ public class BindingsTests extends ScriptTestCase { public void testBasicBinding() { - assertEquals(15, exec("testAddWithState(4, 5, 6)")); + assertEquals(15, exec("testAddWithState(4, 5, 6, 0.0)")); } public void testRepeatedBinding() { - String script = "testAddWithState(4, 5, params.test)"; + String script = "testAddWithState(4, 5, params.test, 0.0)"; Map params = new HashMap<>(); ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); ExecutableScript executableScript = factory.newInstance(params); @@ -48,7 +48,7 @@ public void testRepeatedBinding() { } public void testBoundBinding() { - String script = "testAddWithState(4, params.bound, params.test)"; + String script = "testAddWithState(4, params.bound, params.test, 0.0)"; Map params = new HashMap<>(); ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); ExecutableScript executableScript = factory.newInstance(params); From df45716c129c1769723ef0fbddecd795201d5dc8 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 31 Aug 2018 01:27:00 +0530 Subject: [PATCH 25/52] Replace IndexMetaData.Custom with Map-based custom metadata (#32749) This PR removes the deprecated `Custom` class in `IndexMetaData`, in favor of a `Map` that is used to store custom index metadata. As part of this, there is now no way to set this metadata in a template or create index request (since it's only set by plugins, or dedicated REST endpoints). The `Map` is intended to be a namespaced `Map` (`DiffableStringMap` implements `Map`, so the signature is more like `Map>`). This is so we can do things like: ``` java Map ccrMeta = indexMetaData.getCustom("ccr"); ``` And then have complete control over the metadata. This also means any plugin/feature that uses this has to manage its own BWC, as the map is just serialized as a map. It also means that if metadata is put in the map that isn't used (for instance, if a plugin were removed), it causes no failures the way an unregistered `Setting` would. The reason I use a custom `DiffableStringMap` here rather than a plain `Map` is so the map can be diffed with previous cluster state updates for serialization. Supersedes #32683 --- .../CreateIndexClusterStateUpdateRequest.java | 11 - .../indices/create/CreateIndexRequest.java | 49 +---- .../create/CreateIndexRequestBuilder.java | 9 - .../create/TransportCreateIndexAction.java | 2 +- .../indices/shrink/TransportResizeAction.java | 1 - .../template/put/PutIndexTemplateRequest.java | 44 +--- .../put/TransportPutIndexTemplateAction.java | 1 - .../cluster/metadata/DiffableStringMap.java | 188 ++++++++++++++++++ .../cluster/metadata/IndexMetaData.java | 153 +++++--------- .../metadata/IndexTemplateMetaData.java | 79 ++------ .../metadata/MetaDataCreateIndexService.java | 21 +- .../MetaDataIndexTemplateService.java | 9 - .../metadata/DiffableStringMapTests.java | 103 ++++++++++ .../metadata/IndexCreationTaskTests.java | 42 +--- .../cluster/metadata/IndexMetaDataTests.java | 18 +- .../metadata/IndexTemplateMetaDataTests.java | 4 +- .../authz/store/NativeRolesStoreTests.java | 2 +- 17 files changed, 404 insertions(+), 332 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index ce77790637c8d..96db0dbc95ac1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -56,8 +56,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private final Set blocks = new HashSet<>(); private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; @@ -86,11 +84,6 @@ public CreateIndexClusterStateUpdateRequest aliases(Set aliases) { return this; } - public CreateIndexClusterStateUpdateRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public CreateIndexClusterStateUpdateRequest blocks(Set blocks) { this.blocks.addAll(blocks); return this; @@ -149,10 +142,6 @@ public Set aliases() { return aliases; } - public Map customs() { - return customs; - } - public Set blocks() { return blocks; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 3dec708bfae2a..8858d0ad7609b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -87,8 +86,6 @@ public class CreateIndexRequest extends AcknowledgedRequest private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private boolean updateAllTypes = false; private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; @@ -397,16 +394,7 @@ public CreateIndexRequest source(Map source, DeprecationHandler depre found = true; aliases((Map) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - found = true; - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } + throw new ElasticsearchParseException("unknown key [{}] for create index", name); } } if (!found) { @@ -424,18 +412,6 @@ public Set aliases() { return this.aliases; } - /** - * Adds custom metadata to the index to be created. - */ - public CreateIndexRequest custom(IndexMetaData.Custom custom) { - customs.put(custom.type(), custom); - return this; - } - - public Map customs() { - return this.customs; - } - /** True if all fields that span multiple types should be updated, false otherwise */ public boolean updateAllTypes() { return updateAllTypes; @@ -498,11 +474,13 @@ public void readFrom(StreamInput in) throws IOException { } mappings.put(type, source); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - customs.put(type, customIndexMetaData); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + // This used to be the size of custom metadata classes + int customSize = in.readVInt(); + assert customSize == 0 : "unexpected custom metadata when none is supported"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -523,10 +501,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - out.writeVInt(customs.size()); - for (Map.Entry entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + // Size of custom index metadata, which is removed + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -562,10 +539,6 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t alias.toXContent(builder, params); } builder.endObject(); - - for (Map.Entry entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index 62eef0ff04a25..b59b1928c41d2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -224,14 +223,6 @@ public CreateIndexRequestBuilder setSource(Map source) { return this; } - /** - * Adds custom metadata to the index to be created. - */ - public CreateIndexRequestBuilder addCustom(IndexMetaData.Custom custom) { - request.custom(custom); - return this; - } - /** * Sets the settings and mappings as a single source. */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 372c2eb861237..1fe483d068168 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -75,7 +75,7 @@ protected void masterOperation(final CreateIndexRequest request, final ClusterSt final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index(), request.updateAllTypes()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()).mappings(request.mappings()) - .aliases(request.aliases()).customs(request.customs()) + .aliases(request.aliases()) .waitForActiveShards(request.waitForActiveShards()); createIndexService.createIndex(updateRequest, ActionListener.wrap(response -> diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index f23dafdb5e873..08417ea5feb70 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -180,7 +180,6 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi .masterNodeTimeout(targetIndex.masterNodeTimeout()) .settings(targetIndex.settings()) .aliases(targetIndex.aliases()) - .customs(targetIndex.customs()) .waitForActiveShards(targetIndex.waitForActiveShards()) .recoverFrom(metaData.getIndex()) .resizeType(resizeRequest.getResizeType()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 5d4e558dbb25b..1026c6dca3ac6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -88,8 +87,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest aliases = new HashSet<>(); - private Map customs = new HashMap<>(); - private Integer version; public PutIndexTemplateRequest() { @@ -353,15 +350,7 @@ public PutIndexTemplateRequest source(Map templateSource) { } else if (name.equals("aliases")) { aliases((Map) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } + throw new ElasticsearchParseException("unknown key [{}] in the template ", name); } } return this; @@ -395,15 +384,6 @@ public PutIndexTemplateRequest source(BytesReference source, XContentType xConte return source(XContentHelper.convertToMap(source, true, xContentType).v2()); } - public PutIndexTemplateRequest custom(IndexMetaData.Custom custom) { - customs.put(custom.type(), custom); - return this; - } - - public Map customs() { - return this.customs; - } - public Set aliases() { return this.aliases; } @@ -500,11 +480,13 @@ public void readFrom(StreamInput in) throws IOException { } mappings.put(type, mappingSource); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - customs.put(type, customIndexMetaData); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + // Used to be used for custom index metadata + int customSize = in.readVInt(); + assert customSize == 0 : "expected not to have any custom metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -531,10 +513,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - out.writeVInt(customs.size()); - for (Map.Entry entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -570,10 +550,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); - for (Map.Entry entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } - return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index bd8621a1a7d6f..34eccbf9d8a40 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -84,7 +84,6 @@ protected void masterOperation(final PutIndexTemplateRequest request, final Clus .settings(templateSettingsBuilder.build()) .mappings(request.mappings()) .aliases(request.aliases()) - .customs(request.customs()) .create(request.create()) .masterTimeout(request.masterNodeTimeout()) .version(request.version()), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java new file mode 100644 index 0000000000000..4aa429f570499 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * This is a {@code Map} that implements AbstractDiffable so it + * can be used for cluster state purposes + */ +public class DiffableStringMap extends AbstractMap implements Diffable { + + private final Map innerMap; + + DiffableStringMap(final Map map) { + this.innerMap = map; + } + + @SuppressWarnings("unchecked") + DiffableStringMap(final StreamInput in) throws IOException { + this.innerMap = (Map) (Map) in.readMap(); + } + + @Override + public String put(String key, String value) { + return innerMap.put(key, value); + } + + @Override + public Set> entrySet() { + return innerMap.entrySet(); + } + + @Override + @SuppressWarnings("unchecked") + public void writeTo(StreamOutput out) throws IOException { + out.writeMap((Map) (Map) innerMap); + } + + @Override + public Diff diff(DiffableStringMap previousState) { + return new DiffableStringMapDiff(previousState, this); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return new DiffableStringMapDiff(in); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj instanceof DiffableStringMap) { + DiffableStringMap other = (DiffableStringMap) obj; + return innerMap.equals(other.innerMap); + } else if (obj instanceof Map) { + Map other = (Map) obj; + return innerMap.equals(other); + } else { + return false; + } + } + + @Override + public int hashCode() { + return innerMap.hashCode(); + } + + @Override + public String toString() { + return "DiffableStringMap[" + innerMap.toString() + "]"; + } + + /** + * Represents differences between two DiffableStringMaps. + */ + public static class DiffableStringMapDiff implements Diff { + + private final List deletes; + private final Map upserts; // diffs also become upserts + + private DiffableStringMapDiff(DiffableStringMap before, DiffableStringMap after) { + final List tempDeletes = new ArrayList<>(); + final Map tempUpserts = new HashMap<>(); + for (String key : before.keySet()) { + if (after.containsKey(key) == false) { + tempDeletes.add(key); + } + } + + for (Map.Entry partIter : after.entrySet()) { + String beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } + } + deletes = tempDeletes; + upserts = tempUpserts; + } + + private DiffableStringMapDiff(StreamInput in) throws IOException { + deletes = new ArrayList<>(); + upserts = new HashMap<>(); + int deletesCount = in.readVInt(); + for (int i = 0; i < deletesCount; i++) { + deletes.add(in.readString()); + } + int upsertsCount = in.readVInt(); + for (int i = 0; i < upsertsCount; i++) { + String key = in.readString(); + String newValue = in.readString(); + upserts.put(key, newValue); + } + } + + public List getDeletes() { + return deletes; + } + + public Map> getDiffs() { + return Collections.emptyMap(); + } + + public Map getUpserts() { + return upserts; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletes.size()); + for (String delete : deletes) { + out.writeString(delete); + } + out.writeVInt(upserts.size()); + for (Map.Entry entry : upserts.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + } + + @Override + public DiffableStringMap apply(DiffableStringMap part) { + Map builder = new HashMap<>(part.innerMap); + List deletes = getDeletes(); + for (String delete : deletes) { + builder.remove(delete); + } + assert getDiffs().size() == 0 : "there should never be diffs for DiffableStringMap"; + + for (Map.Entry upsert : upserts.entrySet()) { + builder.put(upsert.getKey(), upsert.getValue()); + } + return new DiffableStringMap(builder); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 28b391123738a..1241d95d5e6d9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -64,7 +64,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Locale; @@ -80,59 +79,6 @@ public class IndexMetaData implements Diffable, ToXContentFragment { - /** - * This class will be removed in v7.0 - */ - @Deprecated - public interface Custom extends Diffable, ToXContent { - - String type(); - - Custom fromMap(Map map) throws IOException; - - Custom fromXContent(XContentParser parser) throws IOException; - - /** - * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput - */ - Diff readDiffFrom(StreamInput in) throws IOException; - - /** - * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged. - */ - Custom readFrom(StreamInput in) throws IOException; - - /** - * Merges from this to another, with this being more important, i.e., if something exists in this and another, - * this will prevail. - */ - Custom mergeWith(Custom another); - } - - public static Map customPrototypes = new HashMap<>(); - - /** - * Register a custom index meta data factory. Make sure to call it from a static block. - */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); - } - - @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); - } - - public static T lookupPrototypeSafe(String type) { - //noinspection unchecked - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); - } - return proto; - } - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); @@ -324,7 +270,7 @@ public Iterator> settings() { private final ImmutableOpenMap mappings; - private final ImmutableOpenMap customs; + private final ImmutableOpenMap customData; private final ImmutableOpenIntMap> inSyncAllocationIds; @@ -343,7 +289,7 @@ public Iterator> settings() { private IndexMetaData(Index index, long version, long mappingVersion, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, - ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds, + ImmutableOpenMap customData, ImmutableOpenIntMap> inSyncAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { @@ -360,7 +306,7 @@ private IndexMetaData(Index index, long version, long mappingVersion, long[] pri this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1); this.settings = settings; this.mappings = mappings; - this.customs = customs; + this.customData = customData; this.aliases = aliases; this.inSyncAllocationIds = inSyncAllocationIds; this.requireFilters = requireFilters; @@ -519,13 +465,12 @@ public MappingMetaData mappingOrDefault(String mappingType) { return mappings.get(MapperService.DEFAULT_MAPPING); } - public ImmutableOpenMap getCustoms() { - return this.customs; + ImmutableOpenMap getCustomData() { + return this.customData; } - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); + public Map getCustomData(final String key) { + return Collections.unmodifiableMap(this.customData.get(key)); } public ImmutableOpenIntMap> getInSyncAllocationIds() { @@ -591,7 +536,7 @@ public boolean equals(Object o) { if (state != that.state) { return false; } - if (!customs.equals(that.customs)) { + if (!customData.equals(that.customData)) { return false; } if (routingNumShards != that.routingNumShards) { @@ -620,7 +565,7 @@ public int hashCode() { result = 31 * result + aliases.hashCode(); result = 31 * result + settings.hashCode(); result = 31 * result + mappings.hashCode(); - result = 31 * result + customs.hashCode(); + result = 31 * result + customData.hashCode(); result = 31 * result + Long.hashCode(routingFactor); result = 31 * result + Long.hashCode(routingNumShards); result = 31 * result + Arrays.hashCode(primaryTerms); @@ -660,7 +605,7 @@ private static class IndexMetaDataDiff implements Diff { private final Settings settings; private final Diff> mappings; private final Diff> aliases; - private final Diff> customs; + private final Diff> customData; private final Diff>> inSyncAllocationIds; private final Diff> rolloverInfos; @@ -674,7 +619,7 @@ private static class IndexMetaDataDiff implements Diff { primaryTerms = after.primaryTerms; mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer()); aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer()); - customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); + customData = DiffableUtils.diff(before.customData, after.customData, DiffableUtils.getStringKeySerializer()); inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); @@ -696,19 +641,8 @@ private static class IndexMetaDataDiff implements Diff { MappingMetaData::readDiffFrom); aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new, AliasMetaData::readDiffFrom); - customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } - - @SuppressWarnings("unchecked") - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); + customData = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DiffableStringMap::new, + DiffableStringMap::readDiffFrom); inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { @@ -733,7 +667,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLongArray(primaryTerms); mappings.writeTo(out); aliases.writeTo(out); - customs.writeTo(out); + customData.writeTo(out); inSyncAllocationIds.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { rolloverInfos.writeTo(out); @@ -751,7 +685,7 @@ public IndexMetaData apply(IndexMetaData part) { builder.primaryTerms(primaryTerms); builder.mappings.putAll(mappings.apply(part.mappings)); builder.aliases.putAll(aliases.apply(part.aliases)); - builder.customs.putAll(customs.apply(part.customs)); + builder.customMetaData.putAll(customData.apply(part.customData)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); return builder.build(); @@ -781,10 +715,17 @@ public static IndexMetaData readFrom(StreamInput in) throws IOException { builder.putAlias(aliasMd); } int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + for (int i = 0; i < customSize; i++) { + String key = in.readString(); + DiffableStringMap custom = new DiffableStringMap(in); + builder.putCustom(key, custom); + } + } else { + assert customSize == 0 : "expected no custom index metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int inSyncAllocationIdsSize = in.readVInt(); for (int i = 0; i < inSyncAllocationIdsSize; i++) { @@ -820,10 +761,14 @@ public void writeTo(StreamOutput out) throws IOException { for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeVInt(customData.size()); + for (final ObjectObjectCursor cursor : customData) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } else { + out.writeVInt(0); } out.writeVInt(inSyncAllocationIds.size()); for (IntObjectCursor> cursor : inSyncAllocationIds) { @@ -856,7 +801,7 @@ public static class Builder { private Settings settings = Settings.Builder.EMPTY_SETTINGS; private final ImmutableOpenMap.Builder mappings; private final ImmutableOpenMap.Builder aliases; - private final ImmutableOpenMap.Builder customs; + private final ImmutableOpenMap.Builder customMetaData; private final ImmutableOpenIntMap.Builder> inSyncAllocationIds; private final ImmutableOpenMap.Builder rolloverInfos; private Integer routingNumShards; @@ -865,7 +810,7 @@ public Builder(String index) { this.index = index; this.mappings = ImmutableOpenMap.builder(); this.aliases = ImmutableOpenMap.builder(); - this.customs = ImmutableOpenMap.builder(); + this.customMetaData = ImmutableOpenMap.builder(); this.inSyncAllocationIds = ImmutableOpenIntMap.builder(); this.rolloverInfos = ImmutableOpenMap.builder(); } @@ -879,7 +824,7 @@ public Builder(IndexMetaData indexMetaData) { this.primaryTerms = indexMetaData.primaryTerms.clone(); this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings); this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases); - this.customs = ImmutableOpenMap.builder(indexMetaData.customs); + this.customMetaData = ImmutableOpenMap.builder(indexMetaData.customData); this.routingNumShards = indexMetaData.routingNumShards; this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds); this.rolloverInfos = ImmutableOpenMap.builder(indexMetaData.rolloverInfos); @@ -1009,8 +954,8 @@ public Builder removeAllAliases() { return this; } - public Builder putCustom(String type, Custom customIndexMetaData) { - this.customs.put(type, customIndexMetaData); + public Builder putCustom(String type, Map customIndexMetaData) { + this.customMetaData.put(type, new DiffableStringMap(customIndexMetaData)); return this; } @@ -1178,7 +1123,7 @@ public IndexMetaData build() { final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); return new IndexMetaData(new Index(index, uuid), version, mappingVersion, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, + tmpAliases.build(), customMetaData.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); } @@ -1206,10 +1151,9 @@ public static void toXContent(IndexMetaData indexMetaData, XContentBuilder build } builder.endArray(); - for (ObjectObjectCursor cursor : indexMetaData.getCustoms()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); + for (ObjectObjectCursor cursor : indexMetaData.customData) { + builder.field(cursor.key); + builder.map(cursor.value); } builder.startObject(KEY_ALIASES); @@ -1318,15 +1262,8 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti assert Version.CURRENT.major <= 5; parser.skipChildren(); } else { - // check if its a custom index metadata - Custom proto = lookupPrototype(currentFieldName); - if (proto == null) { - //TODO warn - parser.skipChildren(); - } else { - Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); - } + // assume it's custom index metadata + builder.putCustom(currentFieldName, parser.mapStrings()); } } else if (token == XContentParser.Token.START_ARRAY) { if (KEY_MAPPINGS.equals(currentFieldName)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 94d0a62ad658b..830a4c23b3ae4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -20,7 +20,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; @@ -87,13 +87,10 @@ public class IndexTemplateMetaData extends AbstractDiffable aliases; - private final ImmutableOpenMap customs; - public IndexTemplateMetaData(String name, int order, Integer version, List patterns, Settings settings, ImmutableOpenMap mappings, - ImmutableOpenMap aliases, - ImmutableOpenMap customs) { + ImmutableOpenMap aliases) { if (patterns == null || patterns.isEmpty()) { throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns); } @@ -104,7 +101,6 @@ public IndexTemplateMetaData(String name, int order, Integer version, this.settings = settings; this.mappings = mappings; this.aliases = aliases; - this.customs = customs; } public String name() { @@ -165,19 +161,6 @@ public ImmutableOpenMap getAliases() { return this.aliases; } - public ImmutableOpenMap customs() { - return this.customs; - } - - public ImmutableOpenMap getCustoms() { - return this.customs; - } - - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); - } - public static Builder builder(String name) { return new Builder(name); } @@ -227,11 +210,13 @@ public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException AliasMetaData aliasMd = new AliasMetaData(in); builder.putAlias(aliasMd); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + // Previously we allowed custom metadata + int customSize = in.readVInt(); + assert customSize == 0 : "expected no custom metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } if (in.getVersion().onOrAfter(Version.V_5_0_0_beta1)) { builder.version(in.readOptionalVInt()); @@ -262,10 +247,8 @@ public void writeTo(StreamOutput out) throws IOException { for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeVInt(0); } if (out.getVersion().onOrAfter(Version.V_5_0_0_beta1)) { out.writeOptionalVInt(version); @@ -276,9 +259,6 @@ public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet( "template", "order", "mappings", "settings", "index_patterns", "aliases", "version"); - static { - VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); - } private String name; @@ -294,13 +274,10 @@ public static class Builder { private final ImmutableOpenMap.Builder aliases; - private final ImmutableOpenMap.Builder customs; - public Builder(String name) { this.name = name; mappings = ImmutableOpenMap.builder(); aliases = ImmutableOpenMap.builder(); - customs = ImmutableOpenMap.builder(); } public Builder(IndexTemplateMetaData indexTemplateMetaData) { @@ -312,7 +289,6 @@ public Builder(IndexTemplateMetaData indexTemplateMetaData) { mappings = ImmutableOpenMap.builder(indexTemplateMetaData.mappings()); aliases = ImmutableOpenMap.builder(indexTemplateMetaData.aliases()); - customs = ImmutableOpenMap.builder(indexTemplateMetaData.customs()); } public Builder order(int order) { @@ -366,23 +342,8 @@ public Builder putAlias(AliasMetaData.Builder aliasMetaData) { return this; } - public Builder putCustom(String type, IndexMetaData.Custom customIndexMetaData) { - this.customs.put(type, customIndexMetaData); - return this; - } - - public Builder removeCustom(String type) { - this.customs.remove(type); - return this; - } - - public IndexMetaData.Custom getCustom(String type) { - return this.customs.get(type); - } - public IndexTemplateMetaData build() { - return new IndexTemplateMetaData(name, order, version, indexPatterns, settings, mappings.build(), - aliases.build(), customs.build()); + return new IndexTemplateMetaData(name, order, version, indexPatterns, settings, mappings.build(), aliases.build()); } @SuppressWarnings("unchecked") @@ -430,12 +391,6 @@ public static void toInnerXContent(IndexTemplateMetaData indexTemplateMetaData, builder.endArray(); } - for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); - } - builder.startObject("aliases"); for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { AliasMetaData.Builder.toXContent(cursor.value, builder, params); @@ -473,15 +428,7 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser, String t builder.putAlias(AliasMetaData.Builder.fromXContent(parser)); } } else { - // check if its a custom index metadata - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(currentFieldName); - if (proto == null) { - //TODO warn - parser.skipChildren(); - } else { - IndexMetaData.Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); - } + throw new ElasticsearchParseException("unknown key [{}] for index template", currentFieldName); } } else if (token == XContentParser.Token.START_ARRAY) { if ("mappings".equals(currentFieldName)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 4052d4b2ce883..02da2af3636f9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.IndexMetaData.Custom; import org.elasticsearch.cluster.metadata.IndexMetaData.State; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -291,7 +290,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { List templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); - Map customs = new HashMap<>(); + Map> customs = new HashMap<>(); // add the request mapping Map> mappings = new HashMap<>(); @@ -304,10 +303,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue())); } - for (Map.Entry entry : request.customs().entrySet()) { - customs.put(entry.getKey(), entry.getValue()); - } - final Index recoverFromIndex = request.recoverFrom(); if (recoverFromIndex == null) { @@ -324,18 +319,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { MapperService.parseMapping(xContentRegistry, mappingString)); } } - // handle custom - for (ObjectObjectCursor cursor : template.customs()) { - String type = cursor.key; - IndexMetaData.Custom custom = cursor.value; - IndexMetaData.Custom existing = customs.get(type); - if (existing == null) { - customs.put(type, custom); - } else { - IndexMetaData.Custom merged = existing.mergeWith(custom); - customs.put(type, merged); - } - } //handle aliases for (ObjectObjectCursor cursor : template.aliases()) { AliasMetaData aliasMetaData = cursor.value; @@ -516,7 +499,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexMetaDataBuilder.putAlias(aliasMetaData); } - for (Map.Entry customEntry : customs.entrySet()) { + for (Map.Entry> customEntry : customs.entrySet()) { indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index b046e2e9cc6b8..3f7b9f5e51e27 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -179,9 +179,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); templateBuilder.putAlias(aliasMetaData); } - for (Map.Entry entry : request.customs.entrySet()) { - templateBuilder.putCustom(entry.getKey(), entry.getValue()); - } IndexTemplateMetaData template = templateBuilder.build(); MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template); @@ -339,7 +336,6 @@ public static class PutRequest { Settings settings = Settings.Builder.EMPTY_SETTINGS; Map mappings = new HashMap<>(); List aliases = new ArrayList<>(); - Map customs = new HashMap<>(); TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; @@ -378,11 +374,6 @@ public PutRequest aliases(Set aliases) { return this; } - public PutRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public PutRequest putMapping(String mappingType, String mappingSource) { mappings.put(mappingType, mappingSource); return this; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java new file mode 100644 index 0000000000000..341022030b374 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class DiffableStringMapTests extends ESTestCase { + + public void testDiffableStringMapDiff() { + Map m = new HashMap<>(); + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + DiffableStringMap dsm = new DiffableStringMap(m); + + Map m2 = new HashMap<>(); + m2.put("foo", "not-bar"); + m2.put("newkey", "yay"); + m2.put("baz", "eggplant"); + DiffableStringMap dsm2 = new DiffableStringMap(m2); + + Diff diff = dsm2.diff(dsm); + assertThat(diff, instanceOf(DiffableStringMap.DiffableStringMapDiff.class)); + DiffableStringMap.DiffableStringMapDiff dsmd = (DiffableStringMap.DiffableStringMapDiff) diff; + + assertThat(dsmd.getDeletes(), containsInAnyOrder("potato")); + assertThat(dsmd.getDiffs().size(), equalTo(0)); + Map upserts = new HashMap<>(); + upserts.put("foo", "not-bar"); + upserts.put("newkey", "yay"); + assertThat(dsmd.getUpserts(), equalTo(upserts)); + + DiffableStringMap dsm3 = diff.apply(dsm); + assertThat(dsm3.get("foo"), equalTo("not-bar")); + assertThat(dsm3.get("newkey"), equalTo("yay")); + assertThat(dsm3.get("baz"), equalTo("eggplant")); + assertThat(dsm3.get("potato"), equalTo(null)); + } + + public void testRandomDiffing() { + Map m = new HashMap<>(); + m.put("1", "1"); + m.put("2", "2"); + m.put("3", "3"); + DiffableStringMap dsm = new DiffableStringMap(m); + DiffableStringMap expected = new DiffableStringMap(m); + + for (int i = 0; i < randomIntBetween(5, 50); i++) { + if (randomBoolean() && expected.size() > 1) { + expected.remove(randomFrom(expected.keySet())); + } else if (randomBoolean()) { + expected.put(randomFrom(expected.keySet()), randomAlphaOfLength(4)); + } else { + expected.put(randomAlphaOfLength(2), randomAlphaOfLength(4)); + } + dsm = expected.diff(dsm).apply(dsm); + } + assertThat(expected, equalTo(dsm)); + } + + public void testSerialization() throws IOException { + Map m = new HashMap<>(); + // Occasionally have an empty map + if (frequently()) { + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + } + DiffableStringMap dsm = new DiffableStringMap(m); + + BytesStreamOutput bso = new BytesStreamOutput(); + dsm.writeTo(bso); + DiffableStringMap deserialized = new DiffableStringMap(bso.bytes().streamInput()); + assertThat(deserialized, equalTo(dsm)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 6220027ee4133..8a6d46b102989 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -57,11 +57,11 @@ import org.mockito.ArgumentCaptor; import java.io.IOException; -import java.util.Map; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.Map; import java.util.Set; -import java.util.Collections; -import java.util.Arrays; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -73,13 +73,13 @@ import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.anyMap; -import static org.mockito.Mockito.times; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class IndexCreationTaskTests extends ESTestCase { @@ -133,7 +133,6 @@ public void testApplyDataFromTemplate() throws Exception { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1")) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "value1")) ); @@ -144,7 +143,6 @@ public void testApplyDataFromTemplate() throws Exception { + "you must manage this on the create index request or with an index template"); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } @@ -152,7 +150,6 @@ public void testApplyDataFromTemplate() throws Exception { public void testApplyDataFromRequest() throws Exception { setupRequestAlias(new Alias("alias1")); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); @@ -162,30 +159,22 @@ public void testApplyDataFromRequest() throws Exception { + "you must manage this on the create index request or with an index template"); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } public void testRequestDataHavePriorityOverTemplateData() throws Exception { - final IndexMetaData.Custom tplCustom = createCustom(); - final IndexMetaData.Custom reqCustom = createCustom(); - final IndexMetaData.Custom mergedCustom = createCustom(); - when(reqCustom.mergeWith(tplCustom)).thenReturn(mergedCustom); - final CompressedXContent tplMapping = createMapping("text"); final CompressedXContent reqMapping = createMapping("keyword"); addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", tplMapping) - .putCustom("custom1", tplCustom) .settings(Settings.builder().put("key1", "tplValue")) ); setupRequestAlias(new Alias("alias1").searchRouting("fromReq")); setupRequestMapping("mapping1", reqMapping); - setupRequestCustom("custom1", reqCustom); reqSettings.put("key1", "reqValue"); final ClusterState result = executeTask(); @@ -193,8 +182,6 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + "if you wish to continue using the default of [5] shards, " + "you must manage this on the create index request or with an index template"); - - assertThat(result.metaData().index("test").getCustoms().get("custom1"), equalTo(mergedCustom)); assertThat(result.metaData().index("test").getAliases().get("alias1").getSearchRouting(), equalTo("fromReq")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("reqValue")); assertThat(getMappingsFromResponse().get("mapping1").toString(), equalTo("{type={properties={field={type=keyword}}}}")); @@ -306,14 +293,13 @@ public void testShrinkIndexIgnoresTemplates() throws Exception { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "tplValue")) ); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), not(hasKey("alias1"))); - assertThat(result.metaData().index("test").getCustoms(), not(hasKey("custom1"))); + assertThat(result.metaData().index("test").getCustomData(), not(hasKey("custom1"))); assertThat(result.metaData().index("test").getSettings().keySet(), not(Matchers.contains("key1"))); assertThat(getMappingsFromResponse(), not(Matchers.hasKey("mapping1"))); } @@ -334,7 +320,6 @@ public void testWriteIndex() throws Exception { Boolean writeIndex = randomBoolean() ? null : randomBoolean(); setupRequestAlias(new Alias("alias1").writeIndex(writeIndex)); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); @@ -352,7 +337,6 @@ public void testWriteIndexValidationException() throws Exception { .numberOfShards(1).numberOfReplicas(0).build(); idxBuilder.put("test2", existingWriteIndex); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); setupRequestAlias(new Alias("alias1").writeIndex(true)); @@ -388,8 +372,8 @@ private IndexMetaData.Builder createIndexMetaDataBuilder(String name, String uui .numberOfReplicas(numReplicas); } - private IndexMetaData.Custom createCustom() { - return mock(IndexMetaData.Custom.class); + private Map createCustom() { + return Collections.singletonMap("a", "b"); } private interface MetaDataBuilderConfigurator { @@ -418,10 +402,6 @@ private void setupRequestMapping(String mappingKey, CompressedXContent mapping) when(request.mappings()).thenReturn(Collections.singletonMap(mappingKey, mapping.string())); } - private void setupRequestCustom(String customKey, IndexMetaData.Custom custom) throws IOException { - when(request.customs()).thenReturn(Collections.singletonMap(customKey, custom)); - } - private CompressedXContent createMapping() throws IOException { return createMapping("text"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 9e8a5e04f43c1..393f7f6b1d4aa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -45,6 +47,8 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.is; @@ -71,6 +75,9 @@ protected NamedXContentRegistry xContentRegistry() { public void testIndexMetaDataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); IndexMetaData metaData = IndexMetaData.builder("foo") .settings(Settings.builder() .put("index.version.created", 1) @@ -80,6 +87,7 @@ public void testIndexMetaDataSerialization() throws IOException { .creationDate(randomLong()) .primaryTerm(0, 2) .setRoutingNumShards(32) + .putCustom("my_custom", customMap) .putRolloverInfo( new RolloverInfo(randomAlphaOfLength(5), Arrays.asList(new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), @@ -93,7 +101,8 @@ public void testIndexMetaDataSerialization() throws IOException { builder.endObject(); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); final IndexMetaData fromXContentMeta = IndexMetaData.fromXContent(parser); - assertEquals(metaData, fromXContentMeta); + assertEquals("expected: " + Strings.toString(metaData) + "\nactual : " + Strings.toString(fromXContentMeta), + metaData, fromXContentMeta); assertEquals(metaData.hashCode(), fromXContentMeta.hashCode()); assertEquals(metaData.getNumberOfReplicas(), fromXContentMeta.getNumberOfReplicas()); @@ -103,6 +112,11 @@ public void testIndexMetaDataSerialization() throws IOException { assertEquals(metaData.getCreationDate(), fromXContentMeta.getCreationDate()); assertEquals(metaData.getRoutingFactor(), fromXContentMeta.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), fromXContentMeta.primaryTerm(0)); + ImmutableOpenMap.Builder expectedCustomBuilder = ImmutableOpenMap.builder(); + expectedCustomBuilder.put("my_custom", new DiffableStringMap(customMap)); + ImmutableOpenMap expectedCustom = expectedCustomBuilder.build(); + assertEquals(metaData.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), fromXContentMeta.getCustomData()); final BytesStreamOutput out = new BytesStreamOutput(); metaData.writeTo(out); @@ -119,6 +133,8 @@ public void testIndexMetaDataSerialization() throws IOException { assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); assertEquals(metaData.getRolloverInfos(), deserialized.getRolloverInfos()); + assertEquals(deserialized.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), deserialized.getCustomData()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java index 57166e09d49c8..9a87208204dd2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java @@ -128,13 +128,13 @@ public void testIndexTemplateMetaDataXContentRoundTrip() throws Exception { public void testValidateInvalidIndexPatterns() throws Exception { final IllegalArgumentException emptyPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(emptyPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got []")); final IllegalArgumentException nullPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(nullPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got null")); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index a2c70db3b63e8..4000969187548 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -259,7 +259,7 @@ private ClusterState getClusterStateWithSecurityIndex() { .put(IndexMetaData.builder(securityIndexName).settings(settings)) .put(new IndexTemplateMetaData(SecurityIndexManager.SECURITY_TEMPLATE_NAME, 0, 0, Collections.singletonList(securityIndexName), Settings.EMPTY, ImmutableOpenMap.of(), - ImmutableOpenMap.of(), ImmutableOpenMap.of())) + ImmutableOpenMap.of())) .build(); if (withAlias) { From 3262f4acc464297ea77d357f98e0a0724ec0ef57 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 30 Aug 2018 15:05:38 -0600 Subject: [PATCH 26/52] Update serialization versions for custom IndexMetaData backport --- .../action/admin/indices/create/CreateIndexRequest.java | 4 ++-- .../admin/indices/template/put/PutIndexTemplateRequest.java | 4 ++-- .../org/elasticsearch/cluster/metadata/IndexMetaData.java | 4 ++-- .../elasticsearch/cluster/metadata/IndexTemplateMetaData.java | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 8858d0ad7609b..45378d24dbdb6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -474,7 +474,7 @@ public void readFrom(StreamInput in) throws IOException { } mappings.put(type, source); } - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_6_5_0)) { // This used to be the size of custom metadata classes int customSize = in.readVInt(); assert customSize == 0 : "unexpected custom metadata when none is supported"; @@ -501,7 +501,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_6_5_0)) { // Size of custom index metadata, which is removed out.writeVInt(0); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 1026c6dca3ac6..575902a2f6349 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -480,7 +480,7 @@ public void readFrom(StreamInput in) throws IOException { } mappings.put(type, mappingSource); } - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_6_5_0)) { // Used to be used for custom index metadata int customSize = in.readVInt(); assert customSize == 0 : "expected not to have any custom metadata"; @@ -513,7 +513,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_6_5_0)) { out.writeVInt(0); } out.writeVInt(aliases.size()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 1241d95d5e6d9..757edcbb50e60 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -715,7 +715,7 @@ public static IndexMetaData readFrom(StreamInput in) throws IOException { builder.putAlias(aliasMd); } int customSize = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { for (int i = 0; i < customSize; i++) { String key = in.readString(); DiffableStringMap custom = new DiffableStringMap(in); @@ -761,7 +761,7 @@ public void writeTo(StreamOutput out) throws IOException { for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { out.writeVInt(customData.size()); for (final ObjectObjectCursor cursor : customData) { out.writeString(cursor.key); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 830a4c23b3ae4..19f7f6d3c02ab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -210,7 +210,7 @@ public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException AliasMetaData aliasMd = new AliasMetaData(in); builder.putAlias(aliasMd); } - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_6_5_0)) { // Previously we allowed custom metadata int customSize = in.readVInt(); assert customSize == 0 : "expected no custom metadata"; @@ -247,7 +247,7 @@ public void writeTo(StreamOutput out) throws IOException { for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_6_5_0)) { out.writeVInt(0); } if (out.getVersion().onOrAfter(Version.V_5_0_0_beta1)) { From 1dc0e93689bbf1bbbbb77155b45ec9f3876a39b7 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 30 Aug 2018 15:25:35 -0600 Subject: [PATCH 27/52] Don't be strict for 6.x --- .../action/admin/indices/create/CreateIndexRequest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 45378d24dbdb6..2754cf1e2791c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -393,8 +393,6 @@ public CreateIndexRequest source(Map source, DeprecationHandler depre } else if (ALIASES.match(name, deprecationHandler)) { found = true; aliases((Map) entry.getValue()); - } else { - throw new ElasticsearchParseException("unknown key [{}] for create index", name); } } if (!found) { From e57cc4f63399515a499c2379b50ad51b05549ff2 Mon Sep 17 00:00:00 2001 From: Nick Peihl Date: Thu, 30 Aug 2018 17:00:10 -0700 Subject: [PATCH 28/52] Add region ISO code to GeoIP Ingest plugin (#31669) (#33276) --- docs/plugins/ingest-geoip.asciidoc | 4 ++-- .../ingest/geoip/GeoIpProcessor.java | 18 +++++++++++++++--- .../geoip/GeoIpProcessorFactoryTests.java | 2 +- .../ingest/geoip/GeoIpProcessorTests.java | 3 ++- .../test/ingest_geoip/20_geoip_processor.yml | 11 +++++++---- 5 files changed, 27 insertions(+), 11 deletions(-) diff --git a/docs/plugins/ingest-geoip.asciidoc b/docs/plugins/ingest-geoip.asciidoc index 32516d07bef37..688b36042ea59 100644 --- a/docs/plugins/ingest-geoip.asciidoc +++ b/docs/plugins/ingest-geoip.asciidoc @@ -26,14 +26,14 @@ include::install_remove.asciidoc[] | `field` | yes | - | The field to get the ip address from for the geographical lookup. | `target_field` | no | geoip | The field that will hold the geographical information looked up from the Maxmind database. | `database_file` | no | GeoLite2-City.mmdb | The database filename in the geoip config directory. The ingest-geoip plugin ships with the GeoLite2-City.mmdb, GeoLite2-Country.mmdb and GeoLite2-ASN.mmdb files. -| `properties` | no | [`continent_name`, `country_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. +| `properties` | no | [`continent_name`, `country_iso_code`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document |====== *Depends on what is available in `database_field`: * If the GeoLite2 City database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_name`, `region_name`, `city_name`, `timezone`, `latitude`, `longitude` +`country_iso_code`, `country_name`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, `latitude`, `longitude` and `location`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoLite2 Country database is used, then the following fields may be added under the `target_field`: `ip`, `country_iso_code`, `country_name` and `continent_name`. The fields actually added depend on what has been found and which properties diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index f1b4b33017e3d..366b6ffc1d241 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -185,6 +185,16 @@ private Map retrieveCityGeoData(InetAddress ipAddress) { geoData.put("continent_name", continentName); } break; + case REGION_ISO_CODE: + // ISO 3166-2 code for country subdivisions. + // See iso.org/iso-3166-country-codes.html + String countryIso = country.getIsoCode(); + String subdivisionIso = subdivision.getIsoCode(); + if (countryIso != null && subdivisionIso != null) { + String regionIsoCode = countryIso + "-" + subdivisionIso; + geoData.put("region_iso_code", regionIsoCode); + } + break; case REGION_NAME: String subdivisionName = subdivision.getName(); if (subdivisionName != null) { @@ -300,8 +310,8 @@ private Map retrieveAsnGeoData(InetAddress ipAddress) { public static final class Factory implements Processor.Factory { static final Set DEFAULT_CITY_PROPERTIES = EnumSet.of( - Property.CONTINENT_NAME, Property.COUNTRY_ISO_CODE, Property.REGION_NAME, - Property.CITY_NAME, Property.LOCATION + Property.CONTINENT_NAME, Property.COUNTRY_ISO_CODE, Property.REGION_ISO_CODE, + Property.REGION_NAME, Property.CITY_NAME, Property.LOCATION ); static final Set DEFAULT_COUNTRY_PROPERTIES = EnumSet.of( Property.CONTINENT_NAME, Property.COUNTRY_ISO_CODE @@ -377,6 +387,7 @@ enum Property { COUNTRY_ISO_CODE, COUNTRY_NAME, CONTINENT_NAME, + REGION_ISO_CODE, REGION_NAME, CITY_NAME, TIMEZONE, @@ -386,7 +397,8 @@ enum Property { static final EnumSet ALL_CITY_PROPERTIES = EnumSet.of( Property.IP, Property.COUNTRY_ISO_CODE, Property.COUNTRY_NAME, Property.CONTINENT_NAME, - Property.REGION_NAME, Property.CITY_NAME, Property.TIMEZONE, Property.LOCATION + Property.REGION_ISO_CODE, Property.REGION_NAME, Property.CITY_NAME, Property.TIMEZONE, + Property.LOCATION ); static final EnumSet ALL_COUNTRY_PROPERTIES = EnumSet.of( Property.IP, Property.CONTINENT_NAME, Property.COUNTRY_NAME, Property.COUNTRY_ISO_CODE diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 0aa2eb9fdfa3b..7a5d6f5808f76 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -284,7 +284,7 @@ public void testBuildIllegalFieldOption() throws Exception { config1.put("properties", Collections.singletonList("invalid")); Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, config1)); assertThat(e.getMessage(), equalTo("[properties] illegal property value [invalid]. valid values are [IP, COUNTRY_ISO_CODE, " + - "COUNTRY_NAME, CONTINENT_NAME, REGION_NAME, CITY_NAME, TIMEZONE, LOCATION]")); + "COUNTRY_NAME, CONTINENT_NAME, REGION_ISO_CODE, REGION_NAME, CITY_NAME, TIMEZONE, LOCATION]")); Map config2 = new HashMap<>(); config2.put("field", "_field"); diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index 48a1769cbf82f..4c04d4e340a71 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -117,11 +117,12 @@ public void testCity_withIpV6() throws Exception { assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(address)); @SuppressWarnings("unchecked") Map geoData = (Map) ingestDocument.getSourceAndMetadata().get("target_field"); - assertThat(geoData.size(), equalTo(8)); + assertThat(geoData.size(), equalTo(9)); assertThat(geoData.get("ip"), equalTo(address)); assertThat(geoData.get("country_iso_code"), equalTo("US")); assertThat(geoData.get("country_name"), equalTo("United States")); assertThat(geoData.get("continent_name"), equalTo("North America")); + assertThat(geoData.get("region_iso_code"), equalTo("US-FL")); assertThat(geoData.get("region_name"), equalTo("Florida")); assertThat(geoData.get("city_name"), equalTo("Hollywood")); assertThat(geoData.get("timezone"), equalTo("America/New_York")); diff --git a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml index 0c400c3c0eabe..012ca7173187c 100644 --- a/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml +++ b/plugins/ingest-geoip/src/test/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml @@ -30,11 +30,12 @@ type: test id: 1 - match: { _source.field1: "128.101.101.101" } - - length: { _source.geoip: 5 } + - length: { _source.geoip: 6 } - match: { _source.geoip.city_name: "Minneapolis" } - match: { _source.geoip.country_iso_code: "US" } - match: { _source.geoip.location.lon: -93.2166 } - match: { _source.geoip.location.lat: 44.9759 } + - match: { _source.geoip.region_iso_code: "US-MN" } - match: { _source.geoip.region_name: "Minnesota" } - match: { _source.geoip.continent_name: "North America" } @@ -54,7 +55,7 @@ { "geoip" : { "field" : "field1", - "properties" : ["city_name", "country_iso_code", "ip", "location", "timezone", "country_name", "region_name", "continent_name"] + "properties" : ["city_name", "country_iso_code", "ip", "location", "timezone", "country_name", "region_iso_code", "region_name", "continent_name"] } } ] @@ -75,7 +76,7 @@ type: test id: 1 - match: { _source.field1: "128.101.101.101" } - - length: { _source.geoip: 8 } + - length: { _source.geoip: 9 } - match: { _source.geoip.city_name: "Minneapolis" } - match: { _source.geoip.country_iso_code: "US" } - match: { _source.geoip.ip: "128.101.101.101" } @@ -83,6 +84,7 @@ - match: { _source.geoip.location.lat: 44.9759 } - match: { _source.geoip.timezone: "America/Chicago" } - match: { _source.geoip.country_name: "United States" } + - match: { _source.geoip.region_iso_code: "US-MN" } - match: { _source.geoip.region_name: "Minnesota" } - match: { _source.geoip.continent_name: "North America" } @@ -188,11 +190,12 @@ type: test id: 2 - match: { _source.field1: "128.101.101.101" } - - length: { _source.geoip: 5 } + - length: { _source.geoip: 6 } - match: { _source.geoip.city_name: "Minneapolis" } - match: { _source.geoip.country_iso_code: "US" } - match: { _source.geoip.location.lon: -93.2166 } - match: { _source.geoip.location.lat: 44.9759 } + - match: { _source.geoip.region_iso_code: "US-MN" } - match: { _source.geoip.region_name: "Minnesota" } - match: { _source.geoip.continent_name: "North America" } From 801baf346578ec4f67933c716d3ee2e6469d22f1 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 30 Aug 2018 18:13:50 -0700 Subject: [PATCH 29/52] [MUTE] SmokeTestWatcherWithSecurityIT flaky tests --- .../elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 665b92bbc0e3f..538d54416bf69 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -158,6 +158,7 @@ public void testSearchInputHasPermissions() throws Exception { assertThat(conditionMet, is(true)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29893") public void testSearchInputWithInsufficientPrivileges() throws Exception { String indexName = "index_not_allowed_to_read"; try (XContentBuilder builder = jsonBuilder()) { @@ -213,6 +214,7 @@ public void testSearchTransformHasPermissions() throws Exception { assertThat(value, is("15")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33291") public void testSearchTransformInsufficientPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); From dfba2164860753e847a02a137742f3172eead0f5 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 31 Aug 2018 09:31:55 +0300 Subject: [PATCH 30/52] Enable forbiddenapis server java9 (#33245) --- .../groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 3 ++- .../gradle/precommit/PrecommitTasks.groovy | 2 +- libs/core/build.gradle | 9 +++++---- server/build.gradle | 10 +++++----- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 688efb843a8f1..6d36f606ad35a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -38,7 +38,6 @@ import org.gradle.api.artifacts.ModuleDependency import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact -import org.gradle.api.artifacts.SelfResolvingDependency import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionGraph import org.gradle.api.plugins.JavaPlugin @@ -212,6 +211,7 @@ class BuildPlugin implements Plugin { project.rootProject.ext.minimumRuntimeVersion = minimumRuntimeVersion project.rootProject.ext.inFipsJvm = inFipsJvm project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion) + project.rootProject.ext.java9Home = findJavaHome("9") } project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion @@ -225,6 +225,7 @@ class BuildPlugin implements Plugin { project.ext.javaVersions = project.rootProject.ext.javaVersions project.ext.inFipsJvm = project.rootProject.ext.inFipsJvm project.ext.gradleJavaVersion = project.rootProject.ext.gradleJavaVersion + project.ext.java9Home = project.rootProject.ext.java9Home } private static String getPaddedMajorVersion(JavaVersion compilerJavaVersionEnum) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index be7561853bbb2..06557d4ccfdb7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -100,7 +100,7 @@ class PrecommitTasks { private static Task configureForbiddenApisCli(Project project) { Task forbiddenApisCli = project.tasks.create('forbiddenApis') - project.sourceSets.forEach { sourceSet -> + project.sourceSets.all { sourceSet -> forbiddenApisCli.dependsOn( project.tasks.create(sourceSet.getTaskName('forbiddenApis', null), ForbiddenApisCliTask) { ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') diff --git a/libs/core/build.gradle b/libs/core/build.gradle index cc5c1e20fc162..9c90837bd80ed 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -46,12 +46,13 @@ if (!isEclipse && !isIdea) { targetCompatibility = 9 } - /* Enable this when forbiddenapis was updated to 2.6. - * See: https://github.com/elastic/elasticsearch/issues/29292 forbiddenApisJava9 { - targetCompatibility = 9 + if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) { + targetCompatibility = JavaVersion.VERSION_1_9 + javaHome = project.java9Home + } + replaceSignatureFiles 'jdk-signatures' } - */ jar { metaInf { diff --git a/server/build.gradle b/server/build.gradle index 134b8aceebba5..8d066d4a799ed 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -58,13 +58,13 @@ if (!isEclipse && !isIdea) { sourceCompatibility = 9 targetCompatibility = 9 } - - /* Enable this when forbiddenapis was updated to 2.6. - * See: https://github.com/elastic/elasticsearch/issues/29292 + forbiddenApisJava9 { - targetCompatibility = 9 + if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) { + targetCompatibility = JavaVersion.VERSION_1_9 + javaHome = project.java9Home + } } - */ jar { metaInf { From 6e1354c9896d63d395e9ef94e44a1a702dca72ac Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 31 Aug 2018 10:45:25 +0300 Subject: [PATCH 31/52] SQL: Support multi-index format as table identifier (#33278) Extend tableIdentifier to support multi-index format; not just * but also enumeration and exclusion Fix #33162 (cherry picked from commit 73eb4cbbbe1c38508f6fc303ca300c508952b507) --- .../sql/analysis/index/IndexResolver.java | 3 +- .../xpack/sql/execution/search/Querier.java | 3 +- .../xpack/sql/parser/IdentifierBuilder.java | 14 ------- .../sql/parser/IdentifierBuilderTests.java | 38 ------------------- .../sql/src/main/resources/command.csv-spec | 24 ++++++++++++ 5 files changed, 28 insertions(+), 54 deletions(-) delete mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 10586c991b1ac..b11542d40ed53 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -19,6 +19,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.xpack.sql.type.EsField; @@ -300,7 +301,7 @@ public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, Ac private static GetIndexRequest createGetIndexRequest(String index) { return new GetIndexRequest() .local(true) - .indices(index) + .indices(Strings.commaDelimitedListToStringArray(index)) .features(Feature.MAPPINGS) //lenient because we throw our own errors looking at the response e.g. if something was not resolved //also because this way security doesn't throw authorization exceptions but rather honours ignore_unavailable diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 055e34758cc75..d0bff77a6485d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; @@ -92,7 +93,7 @@ public void query(Schema schema, QueryContainer query, String index, ActionListe log.trace("About to execute query {} on {}", StringUtils.toString(sourceBuilder), index); } - SearchRequest search = prepareRequest(client, sourceBuilder, timeout, index); + SearchRequest search = prepareRequest(client, sourceBuilder, timeout, Strings.commaDelimitedListToStringArray(index)); ActionListener l; if (query.isAggsOnly()) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java index 8c79ae1ef0595..f09f543c6ffae 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java @@ -21,23 +21,9 @@ public TableIdentifier visitTableIdentifier(TableIdentifierContext ctx) { ParseTree tree = ctx.name != null ? ctx.name : ctx.TABLE_IDENTIFIER(); String index = tree.getText(); - validateIndex(index, source); return new TableIdentifier(source, visitIdentifier(ctx.catalog), index); } - // see https://github.com/elastic/elasticsearch/issues/6736 - static void validateIndex(String index, Location source) { - for (int i = 0; i < index.length(); i++) { - char c = index.charAt(i); - if (Character.isUpperCase(c)) { - throw new ParsingException(source, "Invalid index name (needs to be lowercase) {}", index); - } - if (c == '\\' || c == '/' || c == '<' || c == '>' || c == '|' || c == ',' || c == ' ') { - throw new ParsingException(source, "Invalid index name (illegal character {}) {}", c, index); - } - } - } - @Override public String visitIdentifier(IdentifierContext ctx) { return ctx == null ? null : ctx.getText(); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java deleted file mode 100644 index ec8b8abc51f2d..0000000000000 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.parser; - -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.tree.Location; - -import static org.hamcrest.Matchers.is; - -public class IdentifierBuilderTests extends ESTestCase { - - private static Location L = new Location(1, 10); - - public void testTypicalIndex() throws Exception { - IdentifierBuilder.validateIndex("some-index", L); - } - - public void testInternalIndex() throws Exception { - IdentifierBuilder.validateIndex(".some-internal-index-2020-02-02", L); - } - - public void testIndexPattern() throws Exception { - IdentifierBuilder.validateIndex(".some-*", L); - } - - public void testInvalidIndex() throws Exception { - ParsingException pe = expectThrows(ParsingException.class, () -> IdentifierBuilder.validateIndex("some,index", L)); - assertThat(pe.getMessage(), is("line 1:12: Invalid index name (illegal character ,) some,index")); - } - - public void testUpperCasedIndex() throws Exception { - ParsingException pe = expectThrows(ParsingException.class, () -> IdentifierBuilder.validateIndex("thisIsAnIndex", L)); - assertThat(pe.getMessage(), is("line 1:12: Invalid index name (needs to be lowercase) thisIsAnIndex")); - } -} diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/qa/sql/src/main/resources/command.csv-spec index 89e86e887e140..a8f23e27ffac0 100644 --- a/x-pack/qa/sql/src/main/resources/command.csv-spec +++ b/x-pack/qa/sql/src/main/resources/command.csv-spec @@ -162,3 +162,27 @@ last_name | VARCHAR last_name.keyword | VARCHAR salary | INTEGER ; + + +describeIncludeExclude +DESCRIBE "test_emp*,-test_alias*"; + +column:s | type:s +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + From e5eddc2f9e244424bb5b5d357153e4865f4a76dd Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 31 Aug 2018 08:48:45 +0100 Subject: [PATCH 32/52] [ML] Refactor delimited file structure detection (#33233) 1. Use the term "delimited" rather than "separated values" 2. Use a single factory class with arguments to specify the delimiter and identification constraints This change makes it easier to add support for other delimiter characters. --- .../CsvLogStructureFinderFactory.java | 35 ----- ....java => DelimitedLogStructureFinder.java} | 16 +-- .../DelimitedLogStructureFinderFactory.java | 57 ++++++++ .../ml/logstructurefinder/LogStructure.java | 124 ++++-------------- .../LogStructureFinderManager.java | 8 +- .../logstructurefinder/LogStructureUtils.java | 10 +- ...aratedValuesLogStructureFinderFactory.java | 38 ------ ...aratedValuesLogStructureFinderFactory.java | 37 ------ .../TsvLogStructureFinderFactory.java | 4 +- .../CsvLogStructureFinderFactoryTests.java | 38 ------ ...limitedLogStructureFinderFactoryTests.java | 93 +++++++++++++ ... => DelimitedLogStructureFinderTests.java} | 124 +++++++++--------- .../JsonLogStructureFinderFactoryTests.java | 8 +- .../JsonLogStructureFinderTests.java | 2 +- .../LogStructureFinderManagerTests.java | 2 +- .../LogStructureTestCase.java | 4 +- .../logstructurefinder/LogStructureTests.java | 8 +- ...dValuesLogStructureFinderFactoryTests.java | 23 ---- ...dValuesLogStructureFinderFactoryTests.java | 28 ---- .../TextLogStructureFinderFactoryTests.java | 4 +- .../TextLogStructureFinderTests.java | 2 +- .../TsvLogStructureFinderFactoryTests.java | 33 ----- .../XmlLogStructureFinderFactoryTests.java | 8 +- .../XmlLogStructureFinderTests.java | 2 +- 24 files changed, 278 insertions(+), 430 deletions(-) delete mode 100644 x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java rename x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/{SeparatedValuesLogStructureFinder.java => DelimitedLogStructureFinder.java} (97%) create mode 100644 x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactory.java delete mode 100644 x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java delete mode 100644 x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java delete mode 100644 x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java create mode 100644 x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactoryTests.java rename x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/{SeparatedValuesLogStructureFinderTests.java => DelimitedLogStructureFinderTests.java} (65%) delete mode 100644 x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java delete mode 100644 x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java delete mode 100644 x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java deleted file mode 100644 index cb9e6537252cd..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class CsvLogStructureFinderFactory implements LogStructureFinderFactory { - - /** - * Rules are: - * - The file must be valid CSV - * - It must contain at least two complete records - * - There must be at least two fields per record (otherwise files with no commas could be treated as CSV!) - * - Every CSV record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.EXCEL_PREFERENCE, "CSV"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - CsvPreference.EXCEL_PREFERENCE, false); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinder.java similarity index 97% rename from x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java rename to x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinder.java index fd9d34096b2ed..2f7bb41d0bae7 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinder.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinder.java @@ -29,17 +29,16 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -public class SeparatedValuesLogStructureFinder implements LogStructureFinder { +public class DelimitedLogStructureFinder implements LogStructureFinder { private static final int MAX_LEVENSHTEIN_COMPARISONS = 100; private final List sampleMessages; private final LogStructure structure; - static SeparatedValuesLogStructureFinder makeSeparatedValuesLogStructureFinder(List explanation, String sample, - String charsetName, Boolean hasByteOrderMarker, - CsvPreference csvPreference, boolean trimFields) - throws IOException { + static DelimitedLogStructureFinder makeDelimitedLogStructureFinder(List explanation, String sample, String charsetName, + Boolean hasByteOrderMarker, CsvPreference csvPreference, + boolean trimFields) throws IOException { Tuple>, List> parsed = readRows(sample, csvPreference); List> rows = parsed.v1(); @@ -73,13 +72,14 @@ static SeparatedValuesLogStructureFinder makeSeparatedValuesLogStructureFinder(L String preamble = Pattern.compile("\n").splitAsStream(sample).limit(lineNumbers.get(1)).collect(Collectors.joining("\n", "", "\n")); char delimiter = (char) csvPreference.getDelimiterChar(); - LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.fromSeparator(delimiter)) + LogStructure.Builder structureBuilder = new LogStructure.Builder(LogStructure.Format.DELIMITED) .setCharset(charsetName) .setHasByteOrderMarker(hasByteOrderMarker) .setSampleStart(preamble) .setNumLinesAnalyzed(lineNumbers.get(lineNumbers.size() - 1)) .setNumMessagesAnalyzed(sampleRecords.size()) .setHasHeaderRow(isHeaderInFile) + .setDelimiter(delimiter) .setInputFields(Arrays.stream(headerWithNamedBlanks).collect(Collectors.toList())); if (trimFields) { @@ -131,10 +131,10 @@ static SeparatedValuesLogStructureFinder makeSeparatedValuesLogStructureFinder(L .setExplanation(explanation) .build(); - return new SeparatedValuesLogStructureFinder(sampleMessages, structure); + return new DelimitedLogStructureFinder(sampleMessages, structure); } - private SeparatedValuesLogStructureFinder(List sampleMessages, LogStructure structure) { + private DelimitedLogStructureFinder(List sampleMessages, LogStructure structure) { this.sampleMessages = Collections.unmodifiableList(sampleMessages); this.structure = structure; } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactory.java new file mode 100644 index 0000000000000..3e4c3ea225cf8 --- /dev/null +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactory.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.logstructurefinder; + +import org.supercsv.prefs.CsvPreference; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +public class DelimitedLogStructureFinderFactory implements LogStructureFinderFactory { + + private final CsvPreference csvPreference; + private final int minFieldsPerRow; + private final boolean trimFields; + + DelimitedLogStructureFinderFactory(char delimiter, int minFieldsPerRow, boolean trimFields) { + csvPreference = new CsvPreference.Builder('"', delimiter, "\n").build(); + this.minFieldsPerRow = minFieldsPerRow; + this.trimFields = trimFields; + } + + /** + * Rules are: + * - It must contain at least two complete records + * - There must be a minimum number of fields per record (otherwise files with no commas could be treated as CSV!) + * - Every record except the last must have the same number of fields + * The reason the last record is allowed to have fewer fields than the others is that + * it could have been truncated when the file was sampled. + */ + @Override + public boolean canCreateFromSample(List explanation, String sample) { + String formatName; + switch ((char) csvPreference.getDelimiterChar()) { + case ',': + formatName = "CSV"; + break; + case '\t': + formatName = "TSV"; + break; + default: + formatName = Character.getName(csvPreference.getDelimiterChar()).toLowerCase(Locale.ROOT) + " delimited values"; + break; + } + return DelimitedLogStructureFinder.canCreateFromSample(explanation, sample, minFieldsPerRow, csvPreference, formatName); + } + + @Override + public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) + throws IOException { + return DelimitedLogStructureFinder.makeDelimitedLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, + csvPreference, trimFields); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java index 64a00d20899c1..ea8fe37e62f9f 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructure.java @@ -27,37 +27,14 @@ public class LogStructure implements ToXContentObject { public enum Format { - JSON, XML, CSV, TSV, SEMI_COLON_SEPARATED_VALUES, PIPE_SEPARATED_VALUES, SEMI_STRUCTURED_TEXT; - - public Character separator() { - switch (this) { - case JSON: - case XML: - return null; - case CSV: - return ','; - case TSV: - return '\t'; - case SEMI_COLON_SEPARATED_VALUES: - return ';'; - case PIPE_SEPARATED_VALUES: - return '|'; - case SEMI_STRUCTURED_TEXT: - return null; - default: - throw new IllegalStateException("enum value [" + this + "] missing from switch."); - } - } + JSON, XML, DELIMITED, SEMI_STRUCTURED_TEXT; public boolean supportsNesting() { switch (this) { case JSON: case XML: return true; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: case SEMI_STRUCTURED_TEXT: return false; default: @@ -69,10 +46,7 @@ public boolean isStructured() { switch (this) { case JSON: case XML: - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: return true; case SEMI_STRUCTURED_TEXT: return false; @@ -85,10 +59,7 @@ public boolean isSemiStructured() { switch (this) { case JSON: case XML: - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: return false; case SEMI_STRUCTURED_TEXT: return true; @@ -97,38 +68,6 @@ public boolean isSemiStructured() { } } - public boolean isSeparatedValues() { - switch (this) { - case JSON: - case XML: - return false; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: - return true; - case SEMI_STRUCTURED_TEXT: - return false; - default: - throw new IllegalStateException("enum value [" + this + "] missing from switch."); - } - } - - public static Format fromSeparator(char separator) { - switch (separator) { - case ',': - return CSV; - case '\t': - return TSV; - case ';': - return SEMI_COLON_SEPARATED_VALUES; - case '|': - return PIPE_SEPARATED_VALUES; - default: - throw new IllegalArgumentException("No known format has separator [" + separator + "]"); - } - } - public static Format fromString(String name) { return valueOf(name.trim().toUpperCase(Locale.ROOT)); } @@ -149,7 +88,7 @@ public String toString() { static final ParseField EXCLUDE_LINES_PATTERN = new ParseField("exclude_lines_pattern"); static final ParseField INPUT_FIELDS = new ParseField("input_fields"); static final ParseField HAS_HEADER_ROW = new ParseField("has_header_row"); - static final ParseField SEPARATOR = new ParseField("separator"); + static final ParseField DELIMITER = new ParseField("delimiter"); static final ParseField SHOULD_TRIM_FIELDS = new ParseField("should_trim_fields"); static final ParseField GROK_PATTERN = new ParseField("grok_pattern"); static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp_field"); @@ -171,7 +110,7 @@ public String toString() { PARSER.declareString(Builder::setExcludeLinesPattern, EXCLUDE_LINES_PATTERN); PARSER.declareStringArray(Builder::setInputFields, INPUT_FIELDS); PARSER.declareBoolean(Builder::setHasHeaderRow, HAS_HEADER_ROW); - PARSER.declareString((p, c) -> p.setSeparator(c.charAt(0)), SEPARATOR); + PARSER.declareString((p, c) -> p.setDelimiter(c.charAt(0)), DELIMITER); PARSER.declareBoolean(Builder::setShouldTrimFields, SHOULD_TRIM_FIELDS); PARSER.declareString(Builder::setGrokPattern, GROK_PATTERN); PARSER.declareString(Builder::setTimestampField, TIMESTAMP_FIELD); @@ -191,7 +130,7 @@ public String toString() { private final String excludeLinesPattern; private final List inputFields; private final Boolean hasHeaderRow; - private final Character separator; + private final Character delimiter; private final Boolean shouldTrimFields; private final String grokPattern; private final List timestampFormats; @@ -202,7 +141,7 @@ public String toString() { public LogStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sampleStart, String charset, Boolean hasByteOrderMarker, Format format, String multilineStartPattern, String excludeLinesPattern, List inputFields, - Boolean hasHeaderRow, Character separator, Boolean shouldTrimFields, String grokPattern, String timestampField, + Boolean hasHeaderRow, Character delimiter, Boolean shouldTrimFields, String grokPattern, String timestampField, List timestampFormats, boolean needClientTimezone, Map mappings, List explanation) { @@ -216,7 +155,7 @@ public LogStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sample this.excludeLinesPattern = excludeLinesPattern; this.inputFields = (inputFields == null) ? null : Collections.unmodifiableList(new ArrayList<>(inputFields)); this.hasHeaderRow = hasHeaderRow; - this.separator = separator; + this.delimiter = delimiter; this.shouldTrimFields = shouldTrimFields; this.grokPattern = grokPattern; this.timestampField = timestampField; @@ -266,8 +205,8 @@ public Boolean getHasHeaderRow() { return hasHeaderRow; } - public Character getSeparator() { - return separator; + public Character getDelimiter() { + return delimiter; } public Boolean getShouldTrimFields() { @@ -322,8 +261,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (hasHeaderRow != null) { builder.field(HAS_HEADER_ROW.getPreferredName(), hasHeaderRow.booleanValue()); } - if (separator != null) { - builder.field(SEPARATOR.getPreferredName(), String.valueOf(separator)); + if (delimiter != null) { + builder.field(DELIMITER.getPreferredName(), String.valueOf(delimiter)); } if (shouldTrimFields != null) { builder.field(SHOULD_TRIM_FIELDS.getPreferredName(), shouldTrimFields.booleanValue()); @@ -349,7 +288,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public int hashCode() { return Objects.hash(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, separator, shouldTrimFields, grokPattern, timestampField, + multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, explanation); } @@ -376,7 +315,7 @@ public boolean equals(Object other) { Objects.equals(this.excludeLinesPattern, that.excludeLinesPattern) && Objects.equals(this.inputFields, that.inputFields) && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) && - Objects.equals(this.separator, that.separator) && + Objects.equals(this.delimiter, that.delimiter) && Objects.equals(this.shouldTrimFields, that.shouldTrimFields) && Objects.equals(this.grokPattern, that.grokPattern) && Objects.equals(this.timestampField, that.timestampField) && @@ -397,7 +336,7 @@ public static class Builder { private String excludeLinesPattern; private List inputFields; private Boolean hasHeaderRow; - private Character separator; + private Character delimiter; private Boolean shouldTrimFields; private String grokPattern; private String timestampField; @@ -441,7 +380,6 @@ public Builder setHasByteOrderMarker(Boolean hasByteOrderMarker) { public Builder setFormat(Format format) { this.format = Objects.requireNonNull(format); - this.separator = format.separator(); return this; } @@ -465,13 +403,13 @@ public Builder setHasHeaderRow(Boolean hasHeaderRow) { return this; } - public Builder setShouldTrimFields(Boolean shouldTrimFields) { - this.shouldTrimFields = shouldTrimFields; + public Builder setDelimiter(Character delimiter) { + this.delimiter = delimiter; return this; } - public Builder setSeparator(Character separator) { - this.separator = separator; + public Builder setShouldTrimFields(Boolean shouldTrimFields) { + this.shouldTrimFields = shouldTrimFields; return this; } @@ -542,28 +480,22 @@ public LogStructure build() { if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } - if (separator != null) { - throw new IllegalArgumentException("Separator may not be specified for [" + format + "] structures."); + if (delimiter != null) { + throw new IllegalArgumentException("Delimiter may not be specified for [" + format + "] structures."); } if (grokPattern != null) { throw new IllegalArgumentException("Grok pattern may not be specified for [" + format + "] structures."); } break; - case CSV: - case TSV: - case SEMI_COLON_SEPARATED_VALUES: - case PIPE_SEPARATED_VALUES: + case DELIMITED: if (inputFields == null || inputFields.isEmpty()) { throw new IllegalArgumentException("Input fields must be specified for [" + format + "] structures."); } if (hasHeaderRow == null) { throw new IllegalArgumentException("Has header row must be specified for [" + format + "] structures."); } - Character expectedSeparator = format.separator(); - assert expectedSeparator != null; - if (expectedSeparator.equals(separator) == false) { - throw new IllegalArgumentException("Separator must be [" + expectedSeparator + "] for [" + format + - "] structures."); + if (delimiter == null) { + throw new IllegalArgumentException("Delimiter must be specified for [" + format + "] structures."); } if (grokPattern != null) { throw new IllegalArgumentException("Grok pattern may not be specified for [" + format + "] structures."); @@ -576,8 +508,8 @@ public LogStructure build() { if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } - if (separator != null) { - throw new IllegalArgumentException("Separator may not be specified for [" + format + "] structures."); + if (delimiter != null) { + throw new IllegalArgumentException("Delimiter may not be specified for [" + format + "] structures."); } if (shouldTrimFields != null) { throw new IllegalArgumentException("Should trim fields may not be specified for [" + format + "] structures."); @@ -607,7 +539,7 @@ public LogStructure build() { } return new LogStructure(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, separator, shouldTrimFields, grokPattern, + multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, explanation); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java index a8fd9d7eb895b..e747a588dfd84 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManager.java @@ -69,10 +69,10 @@ public final class LogStructureFinderManager { new JsonLogStructureFinderFactory(), new XmlLogStructureFinderFactory(), // ND-JSON will often also be valid (although utterly weird) CSV, so JSON must come before CSV - new CsvLogStructureFinderFactory(), - new TsvLogStructureFinderFactory(), - new SemiColonSeparatedValuesLogStructureFinderFactory(), - new PipeSeparatedValuesLogStructureFinderFactory(), + new DelimitedLogStructureFinderFactory(',', 2, false), + new DelimitedLogStructureFinderFactory('\t', 2, false), + new DelimitedLogStructureFinderFactory(';', 4, false), + new DelimitedLogStructureFinderFactory('|', 5, true), new TextLogStructureFinderFactory() )); diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java index b1dfee22ee64a..71a68c399910b 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureUtils.java @@ -21,12 +21,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -final class LogStructureUtils { +public final class LogStructureUtils { - static final String DEFAULT_TIMESTAMP_FIELD = "@timestamp"; - static final String MAPPING_TYPE_SETTING = "type"; - static final String MAPPING_FORMAT_SETTING = "format"; - static final String MAPPING_PROPERTIES_SETTING = "properties"; + public static final String DEFAULT_TIMESTAMP_FIELD = "@timestamp"; + public static final String MAPPING_TYPE_SETTING = "type"; + public static final String MAPPING_FORMAT_SETTING = "format"; + public static final String MAPPING_PROPERTIES_SETTING = "properties"; // NUMBER Grok pattern doesn't support scientific notation, so we extend it private static final Grok NUMBER_GROK = new Grok(Grok.getBuiltinPatterns(), "^%{NUMBER}(?:[eE][+-]?[0-3]?[0-9]{1,2})?$"); diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java deleted file mode 100644 index 085599de847f0..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactory.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class PipeSeparatedValuesLogStructureFinderFactory implements LogStructureFinderFactory { - - private static final CsvPreference PIPE_PREFERENCE = new CsvPreference.Builder('"', '|', "\n").build(); - - /** - * Rules are: - * - The file must be valid pipe (|) separated values - * - It must contain at least two complete records - * - There must be at least five fields per record (otherwise files with coincidental - * or no pipe characters could be treated as pipe separated) - * - Every pipe separated value record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 5, PIPE_PREFERENCE, "pipe separated values"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - PIPE_PREFERENCE, true); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java deleted file mode 100644 index e0e80fa7465ba..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -import org.supercsv.prefs.CsvPreference; - -import java.io.IOException; -import java.util.List; - -public class SemiColonSeparatedValuesLogStructureFinderFactory implements LogStructureFinderFactory { - - /** - * Rules are: - * - The file must be valid semi-colon separated values - * - It must contain at least two complete records - * - There must be at least four fields per record (otherwise files with coincidental - * or no semi-colons could be treated as semi-colon separated) - * - Every semi-colon separated value record except the last must have the same number of fields - * The reason the last record is allowed to have fewer fields than the others is that - * it could have been truncated when the file was sampled. - */ - @Override - public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 4, - CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE, "semi-colon separated values"); - } - - @Override - public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) - throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE, false); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java index 733b32346fbed..1b53a33f31ee4 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java +++ b/x-pack/plugin/ml/log-structure-finder/src/main/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactory.java @@ -23,13 +23,13 @@ public class TsvLogStructureFinderFactory implements LogStructureFinderFactory { */ @Override public boolean canCreateFromSample(List explanation, String sample) { - return SeparatedValuesLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.TAB_PREFERENCE, "TSV"); + return DelimitedLogStructureFinder.canCreateFromSample(explanation, sample, 2, CsvPreference.TAB_PREFERENCE, "TSV"); } @Override public LogStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker) throws IOException { - return SeparatedValuesLogStructureFinder.makeSeparatedValuesLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, + return DelimitedLogStructureFinder.makeDelimitedLogStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, CsvPreference.TAB_PREFERENCE, false); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java deleted file mode 100644 index f53ee008d691e..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/CsvLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class CsvLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new CsvLogStructureFinderFactory(); - - // No need to check JSON or XML because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenCsv() { - - assertTrue(factory.canCreateFromSample(explanation, CSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenTsv() { - - assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactoryTests.java new file mode 100644 index 0000000000000..d9eadbc8f0fde --- /dev/null +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderFactoryTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.logstructurefinder; + +public class DelimitedLogStructureFinderFactoryTests extends LogStructureTestCase { + + private LogStructureFinderFactory csvFactory = new DelimitedLogStructureFinderFactory(',', 2, false); + private LogStructureFinderFactory tsvFactory = new DelimitedLogStructureFinderFactory('\t', 2, false); + private LogStructureFinderFactory semiColonDelimitedfactory = new DelimitedLogStructureFinderFactory(';', 4, false); + private LogStructureFinderFactory pipeDelimitedFactory = new DelimitedLogStructureFinderFactory('|', 5, true); + + // CSV - no need to check JSON or XML because they come earlier in the order we check formats + + public void testCanCreateCsvFromSampleGivenCsv() { + + assertTrue(csvFactory.canCreateFromSample(explanation, CSV_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenTsv() { + + assertFalse(csvFactory.canCreateFromSample(explanation, TSV_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenSemiColonDelimited() { + + assertFalse(csvFactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenPipeDelimited() { + + assertFalse(csvFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateCsvFromSampleGivenText() { + + assertFalse(csvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // TSV - no need to check JSON, XML or CSV because they come earlier in the order we check formats + + public void testCanCreateTsvFromSampleGivenTsv() { + + assertTrue(tsvFactory.canCreateFromSample(explanation, TSV_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenSemiColonDelimited() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenPipeDelimited() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateTsvFromSampleGivenText() { + + assertFalse(tsvFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // Semi-colon delimited - no need to check JSON, XML, CSV or TSV because they come earlier in the order we check formats + + public void testCanCreateSemiColonDelimitedFromSampleGivenSemiColonDelimited() { + + assertTrue(semiColonDelimitedfactory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); + } + + public void testCanCreateSemiColonDelimitedFromSampleGivenPipeDelimited() { + + assertFalse(semiColonDelimitedfactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreateSemiColonDelimitedFromSampleGivenText() { + + assertFalse(semiColonDelimitedfactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } + + // Pipe delimited - no need to check JSON, XML, CSV, TSV or semi-colon delimited + // values because they come earlier in the order we check formats + + public void testCanCreatePipeDelimitedFromSampleGivenPipeDelimited() { + + assertTrue(pipeDelimitedFactory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); + } + + public void testCanCreatePipeDelimitedFromSampleGivenText() { + + assertFalse(pipeDelimitedFactory.canCreateFromSample(explanation, TEXT_SAMPLE)); + } +} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderTests.java similarity index 65% rename from x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java rename to x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderTests.java index b62832a0a19cb..57c297cf8d571 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SeparatedValuesLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/DelimitedLogStructureFinderTests.java @@ -12,27 +12,27 @@ import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.xpack.ml.logstructurefinder.SeparatedValuesLogStructureFinder.levenshteinFieldwiseCompareRows; -import static org.elasticsearch.xpack.ml.logstructurefinder.SeparatedValuesLogStructureFinder.levenshteinDistance; +import static org.elasticsearch.xpack.ml.logstructurefinder.DelimitedLogStructureFinder.levenshteinFieldwiseCompareRows; +import static org.elasticsearch.xpack.ml.logstructurefinder.DelimitedLogStructureFinder.levenshteinDistance; import static org.hamcrest.Matchers.arrayContaining; -public class SeparatedValuesLogStructureFinderTests extends LogStructureTestCase { +public class DelimitedLogStructureFinderTests extends LogStructureTestCase { - private LogStructureFinderFactory factory = new CsvLogStructureFinderFactory(); + private LogStructureFinderFactory csvFactory = new DelimitedLogStructureFinderFactory(',', 2, false); public void testCreateConfigsGivenCompleteCsv() throws Exception { String sample = "time,message\n" + "2018-05-17T13:41:23,hello\n" + "2018-05-17T13:41:32,hello again\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -41,7 +41,7 @@ public void testCreateConfigsGivenCompleteCsv() throws Exception { } assertEquals("^\"?time\"?,\"?message\"?", structure.getExcludeLinesPattern()); assertEquals("^\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("time", "message"), structure.getInputFields()); @@ -55,15 +55,15 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception "\"hello\n" + "world\",2018-05-17T13:41:23,1\n" + "\"hello again\n"; // note that this last record is truncated - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -72,7 +72,7 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception } assertEquals("^\"?message\"?,\"?time\"?,\"?count\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("message", "time", "count"), structure.getInputFields()); @@ -88,15 +88,15 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { "2,2016-12-31 15:15:01,2016-12-31 15:15:09,1,.00,1,N,264,264,2,1,0,0.5,0,0,0.3,1.8,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:10:22,1,1.60,1,N,163,143,2,9,0.5,0.5,0,0,0.3,10.3,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:11:01,1,1.40,1,N,164,229,1,9,0.5,0.5,2.05,0,0.3,12.35,,\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -108,7 +108,7 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?,\"?\"?,\"?\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", @@ -126,15 +126,15 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce "2,2016-12-31 15:15:01,2016-12-31 15:15:09,1,.00,1,N,264,264,2,1,0,0.5,0,0,0.3,1.8,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:10:22,1,1.60,1,N,163,143,2,9,0.5,0.5,0,0,0.3,10.3,,\n" + "1,2016-12-01 00:00:01,2016-12-01 00:11:01,1,1.40,1,N,164,229,1,9,0.5,0.5,2.05,0,0.3,12.35,,\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -146,7 +146,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce "\"?extra\"?,\"?mta_tax\"?,\"?tip_amount\"?,\"?tolls_amount\"?,\"?improvement_surcharge\"?,\"?total_amount\"?", structure.getExcludeLinesPattern()); assertEquals("^.*?,\"?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", @@ -161,15 +161,15 @@ public void testCreateConfigsGivenCsvWithTimeLastColumn() throws Exception { String sample = "\"pos_id\",\"trip_id\",\"latitude\",\"longitude\",\"altitude\",\"timestamp\"\n" + "\"1\",\"3\",\"4703.7815\",\"1527.4713\",\"359.9\",\"2017-01-19 16:19:04.742113\"\n" + "\"2\",\"3\",\"4703.7815\",\"1527.4714\",\"359.9\",\"2017-01-19 16:19:05.741890\"\n"; - assertTrue(factory.canCreateFromSample(explanation, sample)); + assertTrue(csvFactory.canCreateFromSample(explanation, sample)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - LogStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker); + LogStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker); LogStructure structure = structureFinder.getStructure(); - assertEquals(LogStructure.Format.CSV, structure.getFormat()); + assertEquals(LogStructure.Format.DELIMITED, structure.getFormat()); assertEquals(charset, structure.getCharset()); if (hasByteOrderMarker == null) { assertNull(structure.getHasByteOrderMarker()); @@ -179,7 +179,7 @@ public void testCreateConfigsGivenCsvWithTimeLastColumn() throws Exception { assertEquals("^\"?pos_id\"?,\"?trip_id\"?,\"?latitude\"?,\"?longitude\"?,\"?altitude\"?,\"?timestamp\"?", structure.getExcludeLinesPattern()); assertNull(structure.getMultilineStartPattern()); - assertEquals(Character.valueOf(','), structure.getSeparator()); + assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("pos_id", "trip_id", "latitude", "longitude", "altitude", "timestamp"), structure.getInputFields()); @@ -195,8 +195,8 @@ public void testFindHeaderFromSampleGivenHeaderInSample() throws IOException { "2014-06-23 00:00:01Z,JBU,877.5927,farequote\n" + "2014-06-23 00:00:01Z,KLM,1355.4812,farequote\n"; - Tuple header = SeparatedValuesLogStructureFinder.findHeaderFromSample(explanation, - SeparatedValuesLogStructureFinder.readRows(withHeader, CsvPreference.EXCEL_PREFERENCE).v1()); + Tuple header = DelimitedLogStructureFinder.findHeaderFromSample(explanation, + DelimitedLogStructureFinder.readRows(withHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertTrue(header.v1()); assertThat(header.v2(), arrayContaining("time", "airline", "responsetime", "sourcetype")); @@ -208,8 +208,8 @@ public void testFindHeaderFromSampleGivenHeaderNotInSample() throws IOException "2014-06-23 00:00:01Z,JBU,877.5927,farequote\n" + "2014-06-23 00:00:01Z,KLM,1355.4812,farequote\n"; - Tuple header = SeparatedValuesLogStructureFinder.findHeaderFromSample(explanation, - SeparatedValuesLogStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); + Tuple header = DelimitedLogStructureFinder.findHeaderFromSample(explanation, + DelimitedLogStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertFalse(header.v1()); assertThat(header.v2(), arrayContaining("column1", "column2", "column3", "column4")); @@ -251,43 +251,43 @@ public void testLevenshteinCompareRows() { public void testLineHasUnescapedQuote() { - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,b,c\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,\"b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,\"b\"\"\",c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"\"\"", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\",b,c", CsvPreference.EXCEL_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a,\"\"b\",c", CsvPreference.EXCEL_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("between\"words,b,c", CsvPreference.EXCEL_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("x and \"y\",b,c", CsvPreference.EXCEL_PREFERENCE)); - - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\tb\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\tb\tc\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\"\"\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"\"\"", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\"\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertFalse(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("\"a\t\"\"b\"\tc", CsvPreference.TAB_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("between\"words\tb\tc", CsvPreference.TAB_PREFERENCE)); - assertTrue(SeparatedValuesLogStructureFinder.lineHasUnescapedQuote("x and \"y\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a,b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a,b,c\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,\"b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,\"b\"\"\",c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a,b,\"c\"\"\"", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\",b,c", CsvPreference.EXCEL_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a,\"\"b\",c", CsvPreference.EXCEL_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("between\"words,b,c", CsvPreference.EXCEL_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("x and \"y\",b,c", CsvPreference.EXCEL_PREFERENCE)); + + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\tb\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\tb\tc\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\t\"b\"\"\"\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("a\tb\t\"c\"\"\"", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"\"\"a\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\"\"\"\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertFalse(DelimitedLogStructureFinder.lineHasUnescapedQuote("\"a\t\"\"b\"\tc", CsvPreference.TAB_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("between\"words\tb\tc", CsvPreference.TAB_PREFERENCE)); + assertTrue(DelimitedLogStructureFinder.lineHasUnescapedQuote("x and \"y\"\tb\tc", CsvPreference.TAB_PREFERENCE)); } public void testRowContainsDuplicateNonEmptyValues() { - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList("a"))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList(""))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "c"))); - assertTrue(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "a"))); - assertTrue(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "b"))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "", ""))); - assertFalse(SeparatedValuesLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("", "a", ""))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList("a"))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Collections.singletonList(""))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "c"))); + assertTrue(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "a"))); + assertTrue(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "b", "b"))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("a", "", ""))); + assertFalse(DelimitedLogStructureFinder.rowContainsDuplicateNonEmptyValues(Arrays.asList("", "a", ""))); } } diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java index 39ef3b9eedbba..cdbffa8259e0c 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderFactoryTests.java @@ -29,14 +29,14 @@ public void testCanCreateFromSampleGivenTsv() { assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); } - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { + public void testCanCreateFromSampleGivenSemiColonDelimited() { - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); } - public void testCanCreateFromSampleGivenPipeSeparatedValues() { + public void testCanCreateFromSampleGivenPipeDelimited() { - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); } public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java index 2f727747bbff3..917054919dd50 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/JsonLogStructureFinderTests.java @@ -29,7 +29,7 @@ public void testCreateConfigsGivenGoodJson() throws Exception { } assertNull(structure.getExcludeLinesPattern()); assertNull(structure.getMultilineStartPattern()); - assertNull(structure.getSeparator()); + assertNull(structure.getDelimiter()); assertNull(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertNull(structure.getGrokPattern()); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java index 1f8691de8cf65..520a55510c7a4 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureFinderManagerTests.java @@ -61,7 +61,7 @@ public void testMakeBestStructureGivenXml() throws Exception { public void testMakeBestStructureGivenCsv() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, "time,message\n" + "2018-05-17T13:41:23,hello\n", StandardCharsets.UTF_8.name(), randomBoolean()), - instanceOf(SeparatedValuesLogStructureFinder.class)); + instanceOf(DelimitedLogStructureFinder.class)); } public void testMakeBestStructureGivenText() throws Exception { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java index 5f9a87ef2a7f2..6b718fef6c7ea 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTestCase.java @@ -34,14 +34,14 @@ public abstract class LogStructureTestCase extends ESTestCase { "\"level\":\"INFO\",\"pid\":42,\"thread\":\"0x7fff7d2a8000\",\"message\":\"message 2\",\"class\":\"ml\"," + "\"method\":\"core::SomeNoiseMaker\",\"file\":\"Noisemaker.cc\",\"line\":333}\n"; - protected static final String PIPE_SEPARATED_VALUES_SAMPLE = "2018-01-06 16:56:14.295748|INFO |VirtualServer |1 |" + + protected static final String PIPE_DELIMITED_SAMPLE = "2018-01-06 16:56:14.295748|INFO |VirtualServer |1 |" + "listening on 0.0.0.0:9987, :::9987\n" + "2018-01-06 17:19:44.465252|INFO |VirtualServer |1 |client " + "'User1'(id:2) changed default admin channelgroup to 'Guest'(id:8)\n" + "2018-01-06 17:21:25.764368|INFO |VirtualServer |1 |client " + "'User1'(id:2) was added to channelgroup 'Channel Admin'(id:5) by client 'User1'(id:2) in channel 'Default Channel'(id:1)"; - protected static final String SEMI_COLON_SEPARATED_VALUES_SAMPLE = "\"pos_id\";\"trip_id\";\"latitude\";\"longitude\";\"altitude\";" + + protected static final String SEMI_COLON_DELIMITED_SAMPLE = "\"pos_id\";\"trip_id\";\"latitude\";\"longitude\";\"altitude\";" + "\"timestamp\"\n" + "\"1\";\"3\";\"4703.7815\";\"1527.4713\";\"359.9\";\"2017-01-19 16:19:04.742113\"\n" + "\"2\";\"3\";\"4703.7815\";\"1527.4714\";\"359.9\";\"2017-01-19 16:19:05.741890\"\n" + diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java index 738928ed28a37..302946dcaa86c 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/LogStructureTests.java @@ -43,14 +43,12 @@ protected LogStructure createTestInstance() { builder.setExcludeLinesPattern(randomAlphaOfLength(100)); } - if (format.isSeparatedValues() || (format.supportsNesting() && randomBoolean())) { + if (format == LogStructure.Format.DELIMITED || (format.supportsNesting() && randomBoolean())) { builder.setInputFields(Arrays.asList(generateRandomStringArray(10, 10, false, false))); } - if (format.isSeparatedValues()) { + if (format == LogStructure.Format.DELIMITED) { builder.setHasHeaderRow(randomBoolean()); - if (rarely()) { - builder.setSeparator(format.separator()); - } + builder.setDelimiter(randomFrom(',', '\t', ';', '|')); } if (format.isSemiStructured()) { builder.setGrokPattern(randomAlphaOfLength(100)); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java deleted file mode 100644 index 3fd2fb7840ac9..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/PipeSeparatedValuesLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class PipeSeparatedValuesLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new PipeSeparatedValuesLogStructureFinderFactory(); - - // No need to check JSON, XML, CSV, TSV or semi-colon separated values because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertTrue(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java deleted file mode 100644 index 64dad7e078cdf..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/SemiColonSeparatedValuesLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class SemiColonSeparatedValuesLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new SemiColonSeparatedValuesLogStructureFinderFactory(); - - // No need to check JSON, XML, CSV or TSV because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertTrue(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java index 267ce375d6e94..c1b30cc749612 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderFactoryTests.java @@ -9,8 +9,8 @@ public class TextLogStructureFinderFactoryTests extends LogStructureTestCase { private LogStructureFinderFactory factory = new TextLogStructureFinderFactory(); - // No need to check JSON, XML, CSV, TSV, semi-colon separated values or pipe - // separated values because they come earlier in the order we check formats + // No need to check JSON, XML, CSV, TSV, semi-colon delimited values or pipe + // delimited values because they come earlier in the order we check formats public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java index 7c6a58bb68387..c9e153a82c437 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TextLogStructureFinderTests.java @@ -34,7 +34,7 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { } assertNull(structure.getExcludeLinesPattern()); assertEquals("^\\[\\b\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}", structure.getMultilineStartPattern()); - assertNull(structure.getSeparator()); + assertNull(structure.getDelimiter()); assertNull(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); assertEquals("\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", structure.getGrokPattern()); diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java deleted file mode 100644 index 1c8acc14d3288..0000000000000 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/TsvLogStructureFinderFactoryTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.logstructurefinder; - -public class TsvLogStructureFinderFactoryTests extends LogStructureTestCase { - - private LogStructureFinderFactory factory = new TsvLogStructureFinderFactory(); - - // No need to check JSON, XML or CSV because they come earlier in the order we check formats - - public void testCanCreateFromSampleGivenTsv() { - - assertTrue(factory.canCreateFromSample(explanation, TSV_SAMPLE)); - } - - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenPipeSeparatedValues() { - - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); - } - - public void testCanCreateFromSampleGivenText() { - - assertFalse(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); - } -} diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java index 27eb4ede040b0..b6dc3a56f1dfb 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderFactoryTests.java @@ -26,14 +26,14 @@ public void testCanCreateFromSampleGivenTsv() { assertFalse(factory.canCreateFromSample(explanation, TSV_SAMPLE)); } - public void testCanCreateFromSampleGivenSemiColonSeparatedValues() { + public void testCanCreateFromSampleGivenSemiColonDelimited() { - assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, SEMI_COLON_DELIMITED_SAMPLE)); } - public void testCanCreateFromSampleGivenPipeSeparatedValues() { + public void testCanCreateFromSampleGivenPipeDelimited() { - assertFalse(factory.canCreateFromSample(explanation, PIPE_SEPARATED_VALUES_SAMPLE)); + assertFalse(factory.canCreateFromSample(explanation, PIPE_DELIMITED_SAMPLE)); } public void testCanCreateFromSampleGivenText() { diff --git a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java index 0d04df152ef00..de653d7bcd0cd 100644 --- a/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java +++ b/x-pack/plugin/ml/log-structure-finder/src/test/java/org/elasticsearch/xpack/ml/logstructurefinder/XmlLogStructureFinderTests.java @@ -29,7 +29,7 @@ public void testCreateConfigsGivenGoodXml() throws Exception { } assertNull(structure.getExcludeLinesPattern()); assertEquals("^\\s* Date: Fri, 31 Aug 2018 13:08:32 +0300 Subject: [PATCH 33/52] Different handling for security specific errors in the CLI. Fix for https://github.com/elastic/elasticsearch/issues/33230 (#33255) --- docs/reference/sql/endpoints/cli.asciidoc | 9 +++++++++ .../main/java/org/elasticsearch/xpack/sql/cli/Cli.java | 5 +++++ 2 files changed, 14 insertions(+) diff --git a/docs/reference/sql/endpoints/cli.asciidoc b/docs/reference/sql/endpoints/cli.asciidoc index 0908c2344bb15..eef2fbfbf5969 100644 --- a/docs/reference/sql/endpoints/cli.asciidoc +++ b/docs/reference/sql/endpoints/cli.asciidoc @@ -22,6 +22,15 @@ the first parameter: $ ./bin/elasticsearch-sql-cli https://some.server:9200 -------------------------------------------------- +If security is enabled on your cluster, you can pass the username +and password in the form `username:password@host_name:port` +to the SQL CLI: + +[source,bash] +-------------------------------------------------- +$ ./bin/elasticsearch-sql-cli https://sql_user:strongpassword@some.server:9200 +-------------------------------------------------- + Once the CLI is running you can use any <> that Elasticsearch supports: diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index 357a4bcb5a770..6431f10a49217 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -27,6 +27,7 @@ import org.jline.terminal.TerminalBuilder; import java.io.IOException; import java.net.ConnectException; +import java.sql.SQLInvalidAuthorizationSpecException; import java.util.Arrays; import java.util.List; import java.util.logging.LogManager; @@ -139,6 +140,10 @@ private void checkConnection(CliSession cliSession, CliTerminal cliTerminal, Con // Most likely Elasticsearch is not running throw new UserException(ExitCodes.IO_ERROR, "Cannot connect to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); + } else if (ex.getCause() != null && ex.getCause() instanceof SQLInvalidAuthorizationSpecException) { + throw new UserException(ExitCodes.NOPERM, + "Cannot establish a secure connection to the server " + + con.connectionString() + " - " + ex.getCause().getMessage()); } else { // Most likely we connected to something other than Elasticsearch throw new UserException(ExitCodes.DATA_ERROR, From cd378d5cc9ffe84705761b5c7c6f0b26215cdd36 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 31 Aug 2018 16:01:54 +0300 Subject: [PATCH 34/52] Work around to be able to generate eclipse projects (#33295) * Work around to be able to generate eclipse projects https://github.com/gradle/gradle/issues/6582 --- build.gradle | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/build.gradle b/build.gradle index f8e2f312f18d5..c5b0a7e05ebcb 100644 --- a/build.gradle +++ b/build.gradle @@ -16,7 +16,9 @@ * specific language governing permissions and limitations * under the License. */ + import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin +import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version @@ -24,14 +26,9 @@ import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.gradle.plugins.ide.eclipse.model.SourceFolder -import org.gradle.util.GradleVersion -import org.gradle.util.DistributionLocator -import org.apache.tools.ant.taskdefs.condition.Os -import org.apache.tools.ant.filters.ReplaceTokens import java.nio.file.Files import java.nio.file.Path -import java.security.MessageDigest plugins { id 'com.gradle.build-scan' version '1.13.2' @@ -516,6 +513,16 @@ allprojects { tasks.cleanEclipse.dependsOn(wipeEclipseSettings) // otherwise the eclipse merging is *super confusing* tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings) + + // work arround https://github.com/gradle/gradle/issues/6582 + tasks.eclipseProject.mustRunAfter tasks.cleanEclipseProject + tasks.matching { it.name == 'eclipseClasspath' }.all { + it.mustRunAfter { tasks.cleanEclipseClasspath } + } + tasks.matching { it.name == 'eclipseJdt' }.all { + it.mustRunAfter { tasks.cleanEclipseJdt } + } + tasks.copyEclipseSettings.mustRunAfter tasks.wipeEclipseSettings } allprojects { From 00b241a50167736e2296cd17cfe1a228017b6452 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Fri, 31 Aug 2018 16:12:01 +0300 Subject: [PATCH 35/52] SQL: test coverage for JdbcResultSet (#32813) * Tests for JdbcResultSet * Added VARCHAR conversion for different types * Made error messages consistent: they now contain both the type that fails to be converted and the value itself --- .../xpack/sql/jdbc/jdbc/JdbcResultSet.java | 65 +- .../xpack/sql/jdbc/jdbc/TypeConverter.java | 87 +- .../jdbc/jdbc/JdbcPreparedStatementTests.java | 42 +- .../qa/sql/nosecurity/JdbcResultSetIT.java | 16 + .../xpack/qa/sql/jdbc/ResultSetTestCase.java | 1522 ++++++++++++++++- .../qa/sql/jdbc/SimpleExampleTestCase.java | 3 +- 6 files changed, 1624 insertions(+), 111 deletions(-) create mode 100644 x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java index 201ae251ca0df..ebdeaef15cae4 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java @@ -133,72 +133,37 @@ public String getString(int columnIndex) throws SQLException { @Override public boolean getBoolean(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? (Boolean) val : false; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a boolean", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Boolean.class) : false; } @Override public byte getByte(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).byteValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a byte", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Byte.class) : 0; } @Override public short getShort(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).shortValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a short", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Short.class) : 0; } @Override public int getInt(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).intValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to an int", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Integer.class) : 0; } @Override public long getLong(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).longValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Long.class) : 0; } @Override public float getFloat(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).floatValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a float", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Float.class) : 0; } @Override public double getDouble(int columnIndex) throws SQLException { - Object val = column(columnIndex); - try { - return val != null ? ((Number) val).doubleValue() : 0; - } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a double", cce); - } + return column(columnIndex) != null ? getObject(columnIndex, Double.class) : 0; } @Override @@ -272,15 +237,29 @@ public byte[] getBytes(String columnLabel) throws SQLException { @Override public Date getDate(String columnLabel) throws SQLException { + // TODO: the error message in case the value in the column cannot be converted to a Date refers to a column index + // (for example - "unable to convert column 4 to a long") and not to the column name, which is a bit confusing. + // Should we reconsider this? Maybe by catching the exception here and rethrowing it with the columnLabel instead. return getDate(column(columnLabel)); } private Long dateTime(int columnIndex) throws SQLException { Object val = column(columnIndex); + JDBCType type = cursor.columns().get(columnIndex - 1).type; try { + // TODO: the B6 appendix of the jdbc spec does mention CHAR, VARCHAR, LONGVARCHAR, DATE, TIMESTAMP as supported + // jdbc types that should be handled by getDate and getTime methods. From all of those we support VARCHAR and + // TIMESTAMP. Should we consider the VARCHAR conversion as a later enhancement? + if (JDBCType.TIMESTAMP.equals(type)) { + // the cursor can return an Integer if the date-since-epoch is small enough, XContentParser (Jackson) will + // return the "smallest" data type for numbers when parsing + // TODO: this should probably be handled server side + return val == null ? null : ((Number) val).longValue(); + }; return val == null ? null : (Long) val; } catch (ClassCastException cce) { - throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", val, type.getName()), cce); } } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index 3b5180b71f7c2..7b638d8bd094f 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -10,7 +10,6 @@ import java.sql.Date; import java.sql.JDBCType; -import java.sql.SQLDataException; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.Time; @@ -56,9 +55,10 @@ private TypeConverter() { } - private static final long DAY_IN_MILLIS = 60 * 60 * 24; + private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000; private static final Map, JDBCType> javaToJDBC; + static { Map, JDBCType> aMap = Arrays.stream(DataType.values()) .filter(dataType -> dataType.javaClass() != null @@ -120,6 +120,7 @@ private static T dateTimeConvert(Long millis, Calendar c, Function T convert(Object val, JDBCType columnType, Class type) throws SQLE return (T) convert(val, columnType); } - if (type.isInstance(val)) { + // converting a Long to a Timestamp shouldn't be possible according to the spec, + // it feels a little brittle to check this scenario here and I don't particularly like it + // TODO: can we do any better or should we go over the spec and allow getLong(date) to be valid? + if (!(type == Long.class && columnType == JDBCType.TIMESTAMP) && type.isInstance(val)) { try { return type.cast(val); } catch (ClassCastException cce) { - throw new SQLDataException("Unable to convert " + val.getClass().getName() + " to " + columnType, cce); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a %s", val, + columnType.getName(), type.getName()), cce); } } @@ -205,7 +210,8 @@ static T convert(Object val, JDBCType columnType, Class type) throws SQLE if (type == OffsetDateTime.class) { return (T) asOffsetDateTime(val, columnType); } - throw new SQLException("Conversion from type [" + columnType + "] to [" + type.getName() + "] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a %s", val, + columnType.getName(), type.getName())); } /** @@ -336,8 +342,11 @@ private static Boolean asBoolean(Object val, JDBCType columnType) throws SQLExce case FLOAT: case DOUBLE: return Boolean.valueOf(Integer.signum(((Number) val).intValue()) != 0); + case VARCHAR: + return Boolean.valueOf((String) val); default: - throw new SQLException("Conversion from type [" + columnType + "] to [Boolean] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Boolean", val, columnType.getName())); } } @@ -355,10 +364,16 @@ private static Byte asByte(Object val, JDBCType columnType) throws SQLException case FLOAT: case DOUBLE: return safeToByte(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Byte.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Byte] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Byte", val, columnType.getName())); } private static Short asShort(Object val, JDBCType columnType) throws SQLException { @@ -374,10 +389,16 @@ private static Short asShort(Object val, JDBCType columnType) throws SQLExceptio case FLOAT: case DOUBLE: return safeToShort(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Short.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Short] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Short", val, columnType.getName())); } private static Integer asInteger(Object val, JDBCType columnType) throws SQLException { @@ -393,10 +414,18 @@ private static Integer asInteger(Object val, JDBCType columnType) throws SQLExce case FLOAT: case DOUBLE: return safeToInt(safeToLong(((Number) val).doubleValue())); + case VARCHAR: + try { + return Integer.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Integer] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to an Integer", val, columnType.getName())); } private static Long asLong(Object val, JDBCType columnType) throws SQLException { @@ -412,12 +441,21 @@ private static Long asLong(Object val, JDBCType columnType) throws SQLException case FLOAT: case DOUBLE: return safeToLong(((Number) val).doubleValue()); - case TIMESTAMP: - return ((Number) val).longValue(); + //TODO: should we support conversion to TIMESTAMP? + //The spec says that getLong() should support the following types conversions: + //TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT, DOUBLE, DECIMAL, NUMERIC, BIT, BOOLEAN, CHAR, VARCHAR, LONGVARCHAR + //case TIMESTAMP: + // return ((Number) val).longValue(); + case VARCHAR: + try { + return Long.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Long] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", val, columnType.getName())); } private static Float asFloat(Object val, JDBCType columnType) throws SQLException { @@ -433,10 +471,16 @@ private static Float asFloat(Object val, JDBCType columnType) throws SQLExceptio case FLOAT: case DOUBLE: return Float.valueOf((((float) ((Number) val).doubleValue()))); + case VARCHAR: + try { + return Float.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Float] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Float", val, columnType.getName())); } private static Double asDouble(Object val, JDBCType columnType) throws SQLException { @@ -451,32 +495,41 @@ private static Double asDouble(Object val, JDBCType columnType) throws SQLExcept case REAL: case FLOAT: case DOUBLE: + return Double.valueOf(((Number) val).doubleValue()); + case VARCHAR: + try { + return Double.valueOf((String) val); + } catch (NumberFormatException e) { + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", val), e); + } default: } - throw new SQLException("Conversion from type [" + columnType + "] to [Double] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Double", val, columnType.getName())); } private static Date asDate(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Date(utcMillisRemoveTime(((Number) val).longValue())); } - throw new SQLException("Conversion from type [" + columnType + "] to [Date] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Date", val, columnType.getName())); } private static Time asTime(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Time(utcMillisRemoveDate(((Number) val).longValue())); } - throw new SQLException("Conversion from type [" + columnType + "] to [Time] not supported"); + throw new SQLException(format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Time", val, columnType.getName())); } private static Timestamp asTimestamp(Object val, JDBCType columnType) throws SQLException { if (columnType == JDBCType.TIMESTAMP) { return new Timestamp(((Number) val).longValue()); } - throw new SQLException("Conversion from type [" + columnType + "] to [Timestamp] not supported"); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Timestamp", val, columnType.getName())); } private static byte[] asByteArray(Object val, JDBCType columnType) { diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java index 9da06f6537c0e..35a3ec5748748 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatementTests.java @@ -25,6 +25,7 @@ import java.util.Locale; import java.util.Map; +import static java.lang.String.format; import static java.sql.JDBCType.BIGINT; import static java.sql.JDBCType.BOOLEAN; import static java.sql.JDBCType.DOUBLE; @@ -68,7 +69,7 @@ public void testThrownExceptionsWhenSettingBooleanValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, true, Types.TIMESTAMP)); - assertEquals("Conversion from type [BOOLEAN] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [true] of type [BOOLEAN] to a Timestamp", sqle.getMessage()); } public void testSettingStringValues() throws SQLException { @@ -92,7 +93,7 @@ public void testThrownExceptionsWhenSettingStringValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, "foo bar", Types.INTEGER)); - assertEquals("Conversion from type [VARCHAR] to [Integer] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [foo bar] of type [VARCHAR] to an Integer", sqle.getMessage()); } public void testSettingByteTypeValues() throws SQLException { @@ -128,7 +129,7 @@ public void testThrownExceptionsWhenSettingByteTypeValues() throws SQLException JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (byte) 6, Types.TIMESTAMP)); - assertEquals("Conversion from type [TINYINT] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [6] of type [TINYINT] to a Timestamp", sqle.getMessage()); } public void testSettingShortTypeValues() throws SQLException { @@ -161,7 +162,7 @@ public void testThrownExceptionsWhenSettingShortTypeValues() throws SQLException JdbcPreparedStatement jps = createJdbcPreparedStatement(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (short) 6, Types.TIMESTAMP)); - assertEquals("Conversion from type [SMALLINT] to [Timestamp] not supported", sqle.getMessage()); + assertEquals("Unable to convert value [6] of type [SMALLINT] to a Timestamp", sqle.getMessage()); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, 256, Types.TINYINT)); assertEquals("Numeric " + 256 + " out of range", sqle.getMessage()); @@ -195,7 +196,7 @@ public void testThrownExceptionsWhenSettingIntegerValues() throws SQLException { int someInt = randomInt(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someInt, Types.TIMESTAMP)); - assertEquals("Conversion from type [INTEGER] to [Timestamp] not supported", sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [INTEGER] to a Timestamp", someInt), sqle.getMessage()); Integer randomIntNotShort = randomIntBetween(32768, Integer.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.SMALLINT)); @@ -236,7 +237,7 @@ public void testThrownExceptionsWhenSettingLongValues() throws SQLException { long someLong = randomLong(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someLong, Types.TIMESTAMP)); - assertEquals("Conversion from type [BIGINT] to [Timestamp] not supported", sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [BIGINT] to a Timestamp", someLong), sqle.getMessage()); Long randomLongNotShort = randomLongBetween(Integer.MAX_VALUE + 1, Long.MAX_VALUE); sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.INTEGER)); @@ -277,7 +278,7 @@ public void testThrownExceptionsWhenSettingFloatValues() throws SQLException { float someFloat = randomFloat(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someFloat, Types.TIMESTAMP)); - assertEquals("Conversion from type [REAL] to [Timestamp] not supported", sqle.getMessage()); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [REAL] to a Timestamp", someFloat), sqle.getMessage()); Float floatNotInt = 5_155_000_000f; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.INTEGER)); @@ -316,7 +317,8 @@ public void testThrownExceptionsWhenSettingDoubleValues() throws SQLException { double someDouble = randomDouble(); SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someDouble, Types.TIMESTAMP)); - assertEquals("Conversion from type [DOUBLE] to [Timestamp] not supported", sqle.getMessage()); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [DOUBLE] to a Timestamp", someDouble), sqle.getMessage()); Double doubleNotInt = 5_155_000_000d; sqle = expectThrows(SQLException.class, () -> jps.setObject(1, doubleNotInt, Types.INTEGER)); @@ -361,7 +363,7 @@ public Object[] getAttributes() throws SQLException { public void testSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); + Timestamp someTimestamp = new Timestamp(randomLong()); jps.setTimestamp(1, someTimestamp); assertEquals(someTimestamp.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); @@ -372,7 +374,7 @@ public void testSettingTimestampValues() throws SQLException { assertEquals(1456708675000L, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertEquals(TIMESTAMP, jdbcType(jps)); - long beforeEpochTime = -randomMillisSinceEpoch(); + long beforeEpochTime = randomLongBetween(Long.MIN_VALUE, 0); jps.setTimestamp(1, new Timestamp(beforeEpochTime), nonDefaultCal); assertEquals(beforeEpochTime, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); assertTrue(value(jps) instanceof java.util.Date); @@ -384,7 +386,7 @@ public void testSettingTimestampValues() throws SQLException { public void testThrownExceptionsWhenSettingTimestampValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch()); + Timestamp someTimestamp = new Timestamp(randomLong()); SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someTimestamp, Types.INTEGER)); assertEquals("Conversion from type java.sql.Timestamp to INTEGER not supported", sqle.getMessage()); @@ -416,12 +418,12 @@ public void testThrownExceptionsWhenSettingTimeValues() throws SQLException { public void testSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + java.sql.Date someSqlDate = new java.sql.Date(randomLong()); jps.setDate(1, someSqlDate); assertEquals(someSqlDate.getTime(), ((Date)value(jps)).getTime()); assertEquals(TIMESTAMP, jdbcType(jps)); - someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + someSqlDate = new java.sql.Date(randomLong()); Calendar nonDefaultCal = randomCalendar(); jps.setDate(1, someSqlDate, nonDefaultCal); assertEquals(someSqlDate.getTime(), convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal)); @@ -435,17 +437,17 @@ public void testSettingSqlDateValues() throws SQLException { public void testThrownExceptionsWhenSettingSqlDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch()); + java.sql.Date someSqlDate = new java.sql.Date(randomLong()); SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, - () -> jps.setObject(1, new java.sql.Date(randomMillisSinceEpoch()), Types.DOUBLE)); + () -> jps.setObject(1, new java.sql.Date(randomLong()), Types.DOUBLE)); assertEquals("Conversion from type " + someSqlDate.getClass().getName() + " to DOUBLE not supported", sqle.getMessage()); } public void testSettingCalendarValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); Calendar someCalendar = randomCalendar(); - someCalendar.setTimeInMillis(randomMillisSinceEpoch()); + someCalendar.setTimeInMillis(randomLong()); jps.setObject(1, someCalendar); assertEquals(someCalendar.getTime(), (Date) value(jps)); @@ -472,7 +474,7 @@ public void testThrownExceptionsWhenSettingCalendarValues() throws SQLException public void testSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Date someDate = new Date(randomMillisSinceEpoch()); + Date someDate = new Date(randomLong()); jps.setObject(1, someDate); assertEquals(someDate, (Date) value(jps)); @@ -486,7 +488,7 @@ public void testSettingDateValues() throws SQLException { public void testThrownExceptionsWhenSettingDateValues() throws SQLException { JdbcPreparedStatement jps = createJdbcPreparedStatement(); - Date someDate = new Date(randomMillisSinceEpoch()); + Date someDate = new Date(randomLong()); SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someDate, Types.BIGINT)); assertEquals("Conversion from type " + someDate.getClass().getName() + " to BIGINT not supported", sqle.getMessage()); @@ -549,10 +551,6 @@ public void testThrownExceptionsWhenSettingByteArrayValues() throws SQLException assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage()); } - private long randomMillisSinceEpoch() { - return randomLongBetween(0, System.currentTimeMillis()); - } - private JdbcPreparedStatement createJdbcPreparedStatement() throws SQLException { return new JdbcPreparedStatement(null, JdbcConfiguration.create("jdbc:es://l:1", null, 0), "?"); } diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java new file mode 100644 index 0000000000000..30756a11f62ec --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcResultSetIT.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ResultSetTestCase; + +/* + * Integration testing class for "no security" (cluster running without the Security plugin, + * or the Security is disbled) scenario. Runs all tests in the base class. + */ +public class JdbcResultSetIT extends ResultSetTestCase { +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java index 861a6dccaba57..447fc4f17e182 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java @@ -5,55 +5,1067 @@ */ package org.elasticsearch.xpack.qa.sql.jdbc; +import org.elasticsearch.client.Request; +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.jdbcx.JdbcDataSource; +import org.elasticsearch.xpack.sql.type.DataType; + import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.sql.Blob; +import java.sql.Clob; import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.JDBCType; +import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.sql.Timestamp; +import java.sql.Types; +import java.util.Arrays; +import java.util.Calendar; import java.util.Date; +import java.util.GregorianCalendar; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.lang.String.format; +import static java.util.Calendar.DAY_OF_MONTH; +import static java.util.Calendar.ERA; +import static java.util.Calendar.HOUR_OF_DAY; +import static java.util.Calendar.MILLISECOND; +import static java.util.Calendar.MINUTE; +import static java.util.Calendar.MONTH; +import static java.util.Calendar.SECOND; +import static java.util.Calendar.YEAR; public class ResultSetTestCase extends JdbcIntegrationTestCase { - public void testGettingTimestamp() throws Exception { - long randomMillis = randomLongBetween(0, System.currentTimeMillis()); + + static final Set fieldsNames = Stream.of("test_byte", "test_integer", "test_long", "test_short", "test_double", + "test_float", "test_keyword") + .collect(Collectors.toCollection(HashSet::new)); + static final Map,JDBCType> dateTimeTestingFields = new HashMap,JDBCType>(); + static final String SELECT_ALL_FIELDS = "SELECT test_boolean, test_byte, test_integer," + + "test_long, test_short, test_double, test_float, test_keyword, test_date FROM test"; + static final String SELECT_WILDCARD = "SELECT * FROM test"; + static { + dateTimeTestingFields.put(new Tuple("test_boolean", true), DataType.BOOLEAN.jdbcType); + dateTimeTestingFields.put(new Tuple("test_byte", 1), DataType.BYTE.jdbcType); + dateTimeTestingFields.put(new Tuple("test_integer", 1), DataType.INTEGER.jdbcType); + dateTimeTestingFields.put(new Tuple("test_long", 1L), DataType.LONG.jdbcType); + dateTimeTestingFields.put(new Tuple("test_short", 1), DataType.SHORT.jdbcType); + dateTimeTestingFields.put(new Tuple("test_double", 1d), DataType.DOUBLE.jdbcType); + dateTimeTestingFields.put(new Tuple("test_float", 1f), DataType.FLOAT.jdbcType); + dateTimeTestingFields.put(new Tuple("test_keyword", "true"), DataType.KEYWORD.jdbcType); + } + + // Byte values testing + public void testGettingValidByteWithoutCasting() throws Exception { + byte random1 = randomByte(); + byte random2 = randomValueOtherThan(random1, () -> randomByte()); + byte random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomByte()); + + createTestDataForByteValueTests(random1, random2, random3); + + doWithQuery("SELECT test_byte, test_null_byte, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.TINYINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.TINYINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getByte(1)); + assertEquals(random1, results.getByte("test_byte")); + assertEquals(random1, (byte) results.getObject("test_byte", Byte.class)); + assertTrue(results.getObject(1) instanceof Byte); + + assertEquals(0, results.getByte(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_byte")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getByte(1)); + assertEquals(random2, results.getByte("test_byte")); + assertTrue(results.getObject(1) instanceof Byte); + assertEquals(random3, results.getByte("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidByteWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomByte()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + byte actual = results.getObject(e.getKey(), Byte.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().byteValue(), results.getByte(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().byteValue(), actual); + } + } + }); + } + + public void testGettingInvalidByte() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + int intNotByte = randomIntBetween(Byte.MAX_VALUE + 1, Integer.MAX_VALUE); + long longNotByte = randomLongBetween(Byte.MAX_VALUE + 1, Long.MAX_VALUE); + short shortNotByte = (short) randomIntBetween(Byte.MAX_VALUE + 1, Short.MAX_VALUE); + double doubleNotByte = randomDoubleBetween(Byte.MAX_VALUE + 1, Double.MAX_VALUE, true); + float floatNotByte = randomFloatBetween(Byte.MAX_VALUE + 1, Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotByte > Long.MAX_VALUE || doubleNotByte < Long.MIN_VALUE) ? + Double.toString(doubleNotByte) : Long.toString(Math.round(doubleNotByte)); + + index("test", "1", builder -> { + builder.field("test_integer", intNotByte); + builder.field("test_long", longNotByte); + builder.field("test_short", shortNotByte); + builder.field("test_double", doubleNotByte); + builder.field("test_float", floatNotByte); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getByte("test_integer")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotByte), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_integer", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotByte), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_short")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", shortNotByte), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_short", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", shortNotByte), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotByte)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotByte)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotByte)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Byte.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotByte)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Byte.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Byte", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getByte("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Byte", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Byte.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Byte", randomDate), + sqle.getMessage()); + }); + } + + // Short values testing + public void testGettingValidShortWithoutCasting() throws Exception { + short random1 = randomShort(); + short random2 = randomValueOtherThan(random1, () -> randomShort()); + short random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomShort()); + + createTestDataForShortValueTests(random1, random2, random3); + + doWithQuery("SELECT test_short, test_null_short, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.SMALLINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.SMALLINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getShort(1)); + assertEquals(random1, results.getShort("test_short")); + assertEquals(random1, results.getObject("test_short")); + assertTrue(results.getObject(1) instanceof Short); + + assertEquals(0, results.getShort(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_short")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getShort(1)); + assertEquals(random2, results.getShort("test_short")); + assertTrue(results.getObject(1) instanceof Short); + assertEquals(random3, results.getShort("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidShortWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomShort()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + short actual = (short) results.getObject(e.getKey(), Short.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), + e.getValue().shortValue(), results.getShort(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().shortValue(), actual); + } + } + }); + } + + public void testGettingInvalidShort() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + int intNotShort = randomIntBetween(Short.MAX_VALUE + 1, Integer.MAX_VALUE); + long longNotShort = randomLongBetween(Short.MAX_VALUE + 1, Long.MAX_VALUE); + double doubleNotShort = randomDoubleBetween(Short.MAX_VALUE + 1, Double.MAX_VALUE, true); + float floatNotShort = randomFloatBetween(Short.MAX_VALUE + 1, Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotShort > Long.MAX_VALUE || doubleNotShort < Long.MIN_VALUE) ? + Double.toString(doubleNotShort) : Long.toString(Math.round(doubleNotShort)); + + index("test", "1", builder -> { + builder.field("test_integer", intNotShort); + builder.field("test_long", longNotShort); + builder.field("test_double", doubleNotShort); + builder.field("test_float", floatNotShort); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getShort("test_integer")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotShort), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_integer", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", intNotShort), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotShort)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotShort)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotShort)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Short.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotShort)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Short.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Short", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getShort("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Short", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Short.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Short", randomDate), + sqle.getMessage()); + }); + } + + // Integer values testing + public void testGettingValidIntegerWithoutCasting() throws Exception { + int random1 = randomInt(); + int random2 = randomValueOtherThan(random1, () -> randomInt()); + int random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomInt()); + + createTestDataForIntegerValueTests(random1, random2, random3); + + doWithQuery("SELECT test_integer,test_null_integer,test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.INTEGER, resultSetMetaData.getColumnType(1)); + assertEquals(Types.INTEGER, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getInt(1)); + assertEquals(random1, results.getInt("test_integer")); + assertEquals(random1, (int) results.getObject("test_integer", Integer.class)); + assertTrue(results.getObject(1) instanceof Integer); + + assertEquals(0, results.getInt(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_integer")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getInt(1)); + assertEquals(random2, results.getInt("test_integer")); + assertTrue(results.getObject(1) instanceof Integer); + assertEquals(random3, results.getInt("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidIntegerWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomInt()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + int actual = results.getObject(e.getKey(), Integer.class); + if (e.getValue() instanceof Double) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else if (e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().floatValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().intValue(), results.getInt(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().intValue(), actual); + } + } + }); + } + + public void testGettingInvalidInteger() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + long longNotInt = randomLongBetween(getMaxIntPlusOne(), Long.MAX_VALUE); + double doubleNotInt = randomDoubleBetween(getMaxIntPlusOne().doubleValue(), Double.MAX_VALUE, true); + float floatNotInt = randomFloatBetween(getMaxIntPlusOne().floatValue(), Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + String doubleErrorMessage = (doubleNotInt > Long.MAX_VALUE || doubleNotInt < Long.MIN_VALUE) ? + Double.toString(doubleNotInt) : Long.toString(Math.round(doubleNotInt)); + + index("test", "1", builder -> { + builder.field("test_long", longNotInt); + builder.field("test_double", doubleNotInt); + builder.field("test_float", floatNotInt); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getInt("test_long")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotInt)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_long", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Long.toString(longNotInt)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", doubleErrorMessage), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotInt)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Integer.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotInt)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Integer.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to an Integer", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getInt("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to an Integer", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Integer.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to an Integer", randomDate), + sqle.getMessage()); + }); + } + + // Long values testing + public void testGettingValidLongWithoutCasting() throws Exception { + long random1 = randomLong(); + long random2 = randomValueOtherThan(random1, () -> randomLong()); + long random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomLong()); + + createTestDataForLongValueTests(random1, random2, random3); + + doWithQuery("SELECT test_long, test_null_long, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.BIGINT, resultSetMetaData.getColumnType(1)); + assertEquals(Types.BIGINT, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getLong(1)); + assertEquals(random1, results.getLong("test_long")); + assertEquals(random1, (long) results.getObject("test_long", Long.class)); + assertTrue(results.getObject(1) instanceof Long); + + assertEquals(0, results.getLong(2)); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_long")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getLong(1)); + assertEquals(random2, results.getLong("test_long")); + assertTrue(results.getObject(1) instanceof Long); + assertEquals(random3, results.getLong("test_keyword")); + + assertFalse(results.next()); + }); + } + + public void testGettingValidLongWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomLong()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + long actual = results.getObject(e.getKey(), Long.class); + if (e.getValue() instanceof Double || e.getValue() instanceof Float) { + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), results.getLong(e.getKey())); + assertEquals("For field " + e.getKey(), Math.round(e.getValue().doubleValue()), actual); + } else { + assertEquals("For field " + e.getKey(), e.getValue().longValue(), results.getLong(e.getKey())); + assertEquals("For field " + e.getKey(), e.getValue().longValue(), actual); + } + } + }); + } + + public void testGettingInvalidLong() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + double doubleNotLong = randomDoubleBetween(getMaxLongPlusOne().doubleValue(), Double.MAX_VALUE, true); + float floatNotLong = randomFloatBetween(getMaxLongPlusOne().floatValue(), Float.MAX_VALUE); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_double", doubleNotLong); + builder.field("test_float", floatNotLong); + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getLong("test_double")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(doubleNotLong)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_double", Long.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(doubleNotLong)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_float")); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotLong)), sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_float", Long.class)); + assertEquals(format(Locale.ROOT, "Numeric %s out of range", Double.toString(floatNotLong)), sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Long.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Long", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getLong("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Long", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Long.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Long", randomDate), + sqle.getMessage()); + }); + } + + // Double values testing + public void testGettingValidDoubleWithoutCasting() throws Exception { + double random1 = randomDouble(); + double random2 = randomValueOtherThan(random1, () -> randomDouble()); + double random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomDouble()); + + createTestDataForDoubleValueTests(random1, random2, random3); + + doWithQuery("SELECT test_double, test_null_double, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(1)); + assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getDouble(1), 0.0d); + assertEquals(random1, results.getDouble("test_double"), 0.0d); + assertEquals(random1, results.getObject("test_double", Double.class), 0.0d); + assertTrue(results.getObject(1) instanceof Double); + + assertEquals(0, results.getDouble(2), 0.0d); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_double")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getDouble(1), 0.0d); + assertEquals(random2, results.getDouble("test_double"), 0.0d); + assertTrue(results.getObject(1) instanceof Double); + assertEquals(random3, results.getDouble("test_keyword"), 0.0d); + + assertFalse(results.next()); + }); + } + + public void testGettingValidDoubleWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomDouble()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + assertEquals("For field " + e.getKey(), e.getValue().doubleValue(), results.getDouble(e.getKey()), 0.0d); + assertEquals("For field " + e.getKey(), + e.getValue().doubleValue(), results.getObject(e.getKey(), Double.class), 0.0d); + } + }); + } + + public void testGettingInvalidDouble() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getDouble("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Double.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Double", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getDouble("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Double", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Double.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Double", randomDate), + sqle.getMessage()); + }); + } + + // Float values testing + public void testGettingValidFloatWithoutCasting() throws Exception { + float random1 = randomFloat(); + float random2 = randomValueOtherThan(random1, () -> randomFloat()); + float random3 = randomValueOtherThanMany(Arrays.asList(random1, random2)::contains, () -> randomFloat()); + + createTestDataForFloatValueTests(random1, random2, random3); + + doWithQuery("SELECT test_float, test_null_float, test_keyword FROM test", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(Types.REAL, resultSetMetaData.getColumnType(1)); + assertEquals(Types.REAL, resultSetMetaData.getColumnType(2)); + assertEquals(random1, results.getFloat(1), 0.0f); + assertEquals(random1, results.getFloat("test_float"), 0.0f); + assertEquals(random1, results.getObject("test_float", Float.class), 0.0f); + assertTrue(results.getObject(1) instanceof Float); + + assertEquals(0, results.getFloat(2), 0.0d); + assertTrue(results.wasNull()); + assertEquals(null, results.getObject("test_null_float")); + assertTrue(results.wasNull()); + + assertTrue(results.next()); + assertEquals(random2, results.getFloat(1), 0.0d); + assertEquals(random2, results.getFloat("test_float"), 0.0d); + assertTrue(results.getObject(1) instanceof Float); + assertEquals(random3, results.getFloat("test_keyword"), 0.0d); + + assertFalse(results.next()); + }); + } + + public void testGettingValidFloatWithCasting() throws Exception { + Map map = createTestDataForNumericValueTypes(() -> randomFloat()); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + for(Entry e : map.entrySet()) { + assertEquals("For field " + e.getKey(), e.getValue().floatValue(), results.getFloat(e.getKey()), 0.0f); + assertEquals("For field " + e.getKey(), + e.getValue().floatValue(), results.getObject(e.getKey(), Float.class), 0.0f); + } + }); + } + + public void testGettingInvalidFloat() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_keyword").field("type", "keyword").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + long randomDate = randomLong(); + + index("test", "1", builder -> { + builder.field("test_keyword", randomString); + builder.field("test_date", randomDate); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + SQLException sqle = expectThrows(SQLException.class, () -> results.getFloat("test_keyword")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", randomString), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_keyword", Float.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [VARCHAR] to a Float", randomString), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getFloat("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Float", randomDate), + sqle.getMessage()); + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Float.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Float", randomDate), + sqle.getMessage()); + }); + } + + public void testGettingBooleanValues() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + long randomDate1 = randomLong(); + long randomDate2 = randomLong(); + + // true values + indexSimpleDocumentWithTrueValues(randomDate1); + + // false values + index("test", "2", builder -> { + builder.field("test_boolean", false); + builder.field("test_byte", 0); + builder.field("test_integer", 0); + builder.field("test_long", 0L); + builder.field("test_short", 0); + builder.field("test_double", 0d); + builder.field("test_float", 0f); + builder.field("test_keyword", "false"); + builder.field("test_date", randomDate2); + }); + + // other (non 0 = true) values + index("test", "3", builder -> { + builder.field("test_byte", randomValueOtherThan((byte) 0, () -> randomByte())); + builder.field("test_integer", randomValueOtherThan(0, () -> randomInt())); + builder.field("test_long", randomValueOtherThan(0L, () -> randomLong())); + builder.field("test_short", randomValueOtherThan((short) 0, () -> randomShort())); + builder.field("test_double", randomValueOtherThanMany(i -> i < 1.0d && i > -1.0d && i < Double.MAX_VALUE + && i > Double.MIN_VALUE, + () -> randomDouble() * randomInt())); + builder.field("test_float", randomValueOtherThanMany(i -> i < 1.0f && i > -1.0f && i < Float.MAX_VALUE && i > Float.MIN_VALUE, + () -> randomFloat() * randomInt())); + builder.field("test_keyword", "1"); + }); + + // other false values + index("test", "4", builder -> { + builder.field("test_keyword", "0"); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + assertEquals(true, results.getBoolean("test_boolean")); + for(String fld : fieldsNames) { + assertEquals("Expected: but was: for field " + fld, true, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); + } + SQLException sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate1), + sqle.getMessage()); + + results.next(); + assertEquals(false, results.getBoolean("test_boolean")); + for(String fld : fieldsNames) { + assertEquals("Expected: but was: for field " + fld, false, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, false, results.getObject(fld, Boolean.class)); + } + sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date")); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate2), + sqle.getMessage()); + + sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Boolean.class)); + assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [TIMESTAMP] to a Boolean", randomDate2), + sqle.getMessage()); + + results.next(); + for(String fld : fieldsNames.stream() + .filter((f) -> !f.equals("test_keyword")).collect(Collectors.toCollection(HashSet::new))) { + assertEquals("Expected: but was: for field " + fld, true, results.getBoolean(fld)); + assertEquals("Expected: but was: for field " + fld, true, results.getObject(fld, Boolean.class)); + } + + results.next(); + assertEquals(false, results.getBoolean("test_keyword")); + assertEquals(false, results.getObject("test_keyword", Boolean.class)); + }); + } + + public void testGettingDateWithoutCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + + String timeZoneId = randomKnownTimeZone(); + Calendar connCalendar = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + connCalendar.setTimeInMillis(randomLongDate); + connCalendar.set(HOUR_OF_DAY, 0); + connCalendar.set(MINUTE, 0); + connCalendar.set(SECOND, 0); + connCalendar.set(MILLISECOND, 0); + + assertEquals(results.getDate("test_date"), new java.sql.Date(connCalendar.getTimeInMillis())); + assertEquals(results.getDate(9), new java.sql.Date(connCalendar.getTimeInMillis())); + assertEquals(results.getObject("test_date", java.sql.Date.class), + new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); + assertEquals(results.getObject(9, java.sql.Date.class), + new java.sql.Date(randomLongDate - (randomLongDate % 86400000L))); + + // bulk validation for all fields which are not of type date + validateErrorsForDateTimeTestsWithoutCalendar(results::getDate); + }); + } + + public void testGettingDateWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(HOUR_OF_DAY, 0); + c.set(MINUTE, 0); + c.set(SECOND, 0); + c.set(MILLISECOND, 0); + + assertEquals(results.getDate("test_date", c), new java.sql.Date(c.getTimeInMillis())); + assertEquals(results.getDate(9, c), new java.sql.Date(c.getTimeInMillis())); + + // bulk validation for all fields which are not of type date + validateErrorsForDateTimeTestsWithCalendar(c, results::getDate); + + results.next(); + assertNull(results.getDate("test_date")); + }); + } + + public void testGettingTimeWithoutCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + + String timeZoneId = randomKnownTimeZone(); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + + assertEquals(results.getTime("test_date"), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getTime(9), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getObject("test_date", java.sql.Time.class), + new java.sql.Time(randomLongDate % 86400000L)); + assertEquals(results.getObject(9, java.sql.Time.class), + new java.sql.Time(randomLongDate % 86400000L)); + + validateErrorsForDateTimeTestsWithoutCalendar(results::getTime); + }); + } + + public void testGettingTimeWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + + assertEquals(results.getTime("test_date", c), new java.sql.Time(c.getTimeInMillis())); + assertEquals(results.getTime(9, c), new java.sql.Time(c.getTimeInMillis())); + + validateErrorsForDateTimeTestsWithCalendar(c, results::getTime); + + results.next(); + assertNull(results.getTime("test_date")); + }); + } + + public void testGettingTimestampWithoutCalendar() throws Exception { + createIndex("library"); + updateMapping("library", builder -> { + builder.startObject("release_date").field("type", "date").endObject(); + builder.startObject("republish_date").field("type", "date").endObject(); + }); + long randomMillis = randomLong(); index("library", "1", builder -> { builder.field("name", "Don Quixote"); builder.field("page_count", 1072); - builder.timeField("release_date", new Date(randomMillis)); + builder.field("release_date", randomMillis); builder.timeField("republish_date", null); }); index("library", "2", builder -> { builder.field("name", "1984"); builder.field("page_count", 328); - builder.timeField("release_date", new Date(-649036800000L)); - builder.timeField("republish_date", new Date(599616000000L)); + builder.field("release_date", -649036800000L); + builder.field("republish_date", 599616000000L); }); - try (Connection connection = esJdbc()) { - try (PreparedStatement statement = connection.prepareStatement("SELECT name, release_date, republish_date FROM library")) { - try (ResultSet results = statement.executeQuery()) { - ResultSetMetaData resultSetMetaData = results.getMetaData(); - - results.next(); - assertEquals(3, resultSetMetaData.getColumnCount()); - assertEquals(randomMillis, results.getTimestamp("release_date").getTime()); - assertEquals(randomMillis, results.getTimestamp(2).getTime()); - assertTrue(results.getObject(2) instanceof Timestamp); - assertEquals(randomMillis, ((Timestamp) results.getObject("release_date")).getTime()); - - assertNull(results.getTimestamp(3)); - assertNull(results.getObject("republish_date")); - - assertTrue(results.next()); - assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); - assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); - - assertFalse(results.next()); - } - } - } + doWithQuery("SELECT name, release_date, republish_date FROM library", (results) -> { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(randomMillis, results.getTimestamp("release_date").getTime()); + assertEquals(randomMillis, results.getTimestamp(2).getTime()); + assertTrue(results.getObject(2) instanceof Timestamp); + assertEquals(randomMillis, ((Timestamp) results.getObject("release_date")).getTime()); + + assertNull(results.getTimestamp(3)); + assertNull(results.getObject("republish_date")); + + assertTrue(results.next()); + assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); + assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); + + assertFalse(results.next()); + }); + } + + public void testGettingTimestampWithCalendar() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + Long randomLongDate = randomLong(); + indexSimpleDocumentWithTrueValues(randomLongDate); + index("test", "2", builder -> { + builder.timeField("test_date", null); + }); + + String timeZoneId = randomKnownTimeZone(); + String anotherTZId = randomValueOtherThan(timeZoneId, () -> randomKnownTimeZone()); + Calendar c = Calendar.getInstance(TimeZone.getTimeZone(anotherTZId), Locale.ROOT); + + doWithQueryAndTimezone(SELECT_ALL_FIELDS, timeZoneId, (results) -> { + results.next(); + c.setTimeInMillis(randomLongDate); + + assertEquals(results.getTimestamp("test_date", c), new java.sql.Timestamp(c.getTimeInMillis())); + assertEquals(results.getTimestamp(9, c), new java.sql.Timestamp(c.getTimeInMillis())); + + validateErrorsForDateTimeTestsWithCalendar(c, results::getTimestamp); + + results.next(); + assertNull(results.getTimestamp("test_date")); + }); + } + + public void testValidGetObjectCalls() throws Exception { + createIndex("test"); + updateMappingForNumericValuesTests("test"); + updateMapping("test", builder -> { + builder.startObject("test_boolean").field("type", "boolean").endObject(); + builder.startObject("test_date").field("type", "date").endObject(); + }); + + byte b = randomByte(); + int i = randomInt(); + long l = randomLong(); + short s = (short) randomIntBetween(Short.MIN_VALUE, Short.MAX_VALUE); + double d = randomDouble(); + float f = randomFloat(); + boolean randomBool = randomBoolean(); + Long randomLongDate = randomLong(); + String randomString = randomUnicodeOfCodepointLengthBetween(128, 256); + + index("test", "1", builder -> { + builder.field("test_byte", b); + builder.field("test_integer", i); + builder.field("test_long", l); + builder.field("test_short", s); + builder.field("test_double", d); + builder.field("test_float", f); + builder.field("test_keyword", randomString); + builder.field("test_date", randomLongDate); + builder.field("test_boolean", randomBool); + }); + + doWithQuery(SELECT_WILDCARD, (results) -> { + results.next(); + + assertEquals(b, results.getObject("test_byte")); + assertTrue(results.getObject("test_byte") instanceof Byte); + + assertEquals(i, results.getObject("test_integer")); + assertTrue(results.getObject("test_integer") instanceof Integer); + + assertEquals(l, results.getObject("test_long")); + assertTrue(results.getObject("test_long") instanceof Long); + + assertEquals(s, results.getObject("test_short")); + assertTrue(results.getObject("test_short") instanceof Short); + + assertEquals(d, results.getObject("test_double")); + assertTrue(results.getObject("test_double") instanceof Double); + + assertEquals(f, results.getObject("test_float")); + assertTrue(results.getObject("test_float") instanceof Float); + + assertEquals(randomString, results.getObject("test_keyword")); + assertTrue(results.getObject("test_keyword") instanceof String); + + assertEquals(new Date(randomLongDate), results.getObject("test_date")); + assertTrue(results.getObject("test_date") instanceof Timestamp); + + assertEquals(randomBool, results.getObject("test_boolean")); + assertTrue(results.getObject("test_boolean") instanceof Boolean); + }); } /* @@ -79,4 +1091,458 @@ public void testNoInfiniteRecursiveGetObjectCalls() throws SQLException, IOExcep fail("Infinite recursive call on getObject() method"); } } + + public void testUnsupportedGetMethods() throws IOException, SQLException { + index("test", "1", builder -> { + builder.field("test", "test"); + }); + Connection conn = esJdbc(); + PreparedStatement statement = conn.prepareStatement("SELECT * FROM test"); + ResultSet r = statement.executeQuery(); + + r.next(); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getAsciiStream("test"), "AsciiStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getAsciiStream(1), "AsciiStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getArray("test"), "Array not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getArray(1), "Array not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBigDecimal("test"), "BigDecimal not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBigDecimal("test"), "BigDecimal not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBinaryStream("test"), "BinaryStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBinaryStream(1), "BinaryStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBlob("test"), "Blob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getBlob(1), "Blob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getCharacterStream("test"), "CharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getCharacterStream(1), "CharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getClob("test"), "Clob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getClob(1), "Clob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNCharacterStream("test"), "NCharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNCharacterStream(1), "NCharacterStream not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNClob("test"), "NClob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNClob(1), "NClob not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNString("test"), "NString not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getNString(1), "NString not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRef("test"), "Ref not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRef(1), "Ref not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRowId("test"), "RowId not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getRowId(1), "RowId not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getSQLXML("test"), "SQLXML not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getSQLXML(1), "SQLXML not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getURL("test"), "URL not supported"); + assertThrowsUnsupportedAndExpectErrorMessage(() -> r.getURL(1), "URL not supported"); + } + + public void testUnsupportedUpdateMethods() throws IOException, SQLException { + index("test", "1", builder -> { + builder.field("test", "test"); + }); + Connection conn = esJdbc(); + PreparedStatement statement = conn.prepareStatement("SELECT * FROM test"); + ResultSet r = statement.executeQuery(); + + r.next(); + Blob b = null; + InputStream i = null; + Clob c = null; + NClob nc = null; + Reader rd = null; + + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, b)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, i)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", b)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", i)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean(1, false)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean("", false)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte(1, (byte) 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte("", (byte) 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, c)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", c)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble(1, 0d)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble("", 0d)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat(1, 0f)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat("", 0f)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt(1, 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt("", 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong(1, 0L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong("", 0L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null, 1L)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, nc)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", nc)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", rd)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull(1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull("")); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null, 1)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort(1, (short) 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort("", (short) 0)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateString(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateString("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp(1, null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp("", null)); + assertThrowsWritesUnsupportedForUpdate(() -> r.insertRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.deleteRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.cancelRowUpdates()); + assertThrowsWritesUnsupportedForUpdate(() -> r.moveToInsertRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.refreshRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.moveToCurrentRow()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowUpdated()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowInserted()); + assertThrowsWritesUnsupportedForUpdate(() -> r.rowDeleted()); + } + + private void doWithQuery(String query, CheckedConsumer consumer) throws SQLException { + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement(query)) { + try (ResultSet results = statement.executeQuery()) { + consumer.accept(results); + } + } + } + } + + private void doWithQueryAndTimezone(String query, String tz, CheckedConsumer consumer) throws SQLException { + try (Connection connection = esJdbc(tz)) { + try (PreparedStatement statement = connection.prepareStatement(query)) { + try (ResultSet results = statement.executeQuery()) { + consumer.accept(results); + } + } + } + } + + private void createIndex(String index) throws Exception { + Request request = new Request("PUT", "/" + index); + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + createIndex.field("number_of_replicas", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("doc"); + { + createIndex.startObject("properties"); + {} + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + request.setJsonEntity(Strings.toString(createIndex)); + client().performRequest(request); + } + + private void updateMapping(String index, CheckedConsumer body) throws Exception { + Request request = new Request("PUT", "/" + index + "/_mapping/doc"); + XContentBuilder updateMapping = JsonXContent.contentBuilder().startObject(); + updateMapping.startObject("properties"); + { + body.accept(updateMapping); + } + updateMapping.endObject().endObject(); + + request.setJsonEntity(Strings.toString(updateMapping)); + client().performRequest(request); + } + + private void createTestDataForByteValueTests(byte random1, byte random2, byte random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_byte").field("type", "byte").endObject(); + builder.startObject("test_null_byte").field("type", "byte").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_byte", random1); + builder.field("test_null_byte", (Byte) null); + }); + index("test", "2", builder -> { + builder.field("test_byte", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForShortValueTests(short random1, short random2, short random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_short").field("type", "short").endObject(); + builder.startObject("test_null_short").field("type", "short").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_short", random1); + builder.field("test_null_short", (Short) null); + }); + index("test", "2", builder -> { + builder.field("test_short", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForIntegerValueTests(int random1, int random2, int random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_integer").field("type", "integer").endObject(); + builder.startObject("test_null_integer").field("type", "integer").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_integer", random1); + builder.field("test_null_integer", (Integer) null); + }); + index("test", "2", builder -> { + builder.field("test_integer", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForLongValueTests(long random1, long random2, long random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_long").field("type", "long").endObject(); + builder.startObject("test_null_long").field("type", "long").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_long", random1); + builder.field("test_null_long", (Long) null); + }); + index("test", "2", builder -> { + builder.field("test_long", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForDoubleValueTests(double random1, double random2, double random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_double").field("type", "double").endObject(); + builder.startObject("test_null_double").field("type", "double").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_double", random1); + builder.field("test_null_double", (Double) null); + }); + index("test", "2", builder -> { + builder.field("test_double", random2); + builder.field("test_keyword", random3); + }); + } + + private void createTestDataForFloatValueTests(float random1, float random2, float random3) throws Exception, IOException { + createIndex("test"); + updateMapping("test", builder -> { + builder.startObject("test_float").field("type", "float").endObject(); + builder.startObject("test_null_float").field("type", "float").endObject(); + builder.startObject("test_keyword").field("type", "keyword").endObject(); + }); + + index("test", "1", builder -> { + builder.field("test_float", random1); + builder.field("test_null_float", (Double) null); + }); + index("test", "2", builder -> { + builder.field("test_float", random2); + builder.field("test_keyword", random3); + }); + } + + private void indexSimpleDocumentWithTrueValues(Long randomLongDate) throws IOException { + index("test", "1", builder -> { + builder.field("test_boolean", true); + builder.field("test_byte", 1); + builder.field("test_integer", 1); + builder.field("test_long", 1L); + builder.field("test_short", 1); + builder.field("test_double", 1d); + builder.field("test_float", 1f); + builder.field("test_keyword", "true"); + builder.field("test_date", randomLongDate); + }); + } + + /** + * Creates test data for all numeric get* methods. All values random and different from the other numeric fields already generated. + * It returns a map containing the field name and its randomly generated value to be later used in checking the returned values. + */ + private Map createTestDataForNumericValueTypes(Supplier randomGenerator) throws Exception, IOException { + Map map = new HashMap(); + createIndex("test"); + updateMappingForNumericValuesTests("test"); + + index("test", "1", builder -> { + // random Byte + byte test_byte = randomValueOtherThanMany(map::containsValue, randomGenerator).byteValue(); + builder.field("test_byte", test_byte); + map.put("test_byte", test_byte); + + // random Integer + int test_integer = randomValueOtherThanMany(map::containsValue, randomGenerator).intValue(); + builder.field("test_integer", test_integer); + map.put("test_integer", test_integer); + + // random Short + int test_short = randomValueOtherThanMany(map::containsValue, randomGenerator).shortValue(); + builder.field("test_short", test_short); + map.put("test_short", test_short); + + // random Long + long test_long = randomValueOtherThanMany(map::containsValue, randomGenerator).longValue(); + builder.field("test_long", test_long); + map.put("test_long", test_long); + + // random Double + double test_double = randomValueOtherThanMany(map::containsValue, randomGenerator).doubleValue(); + builder.field("test_double", test_double); + map.put("test_double", test_double); + + // random Float + float test_float = randomValueOtherThanMany(map::containsValue, randomGenerator).floatValue(); + builder.field("test_float", test_float); + map.put("test_float", test_float); + }); + return map; + } + + private void updateMappingForNumericValuesTests(String indexName) throws Exception { + updateMapping(indexName, builder -> { + for(String field : fieldsNames) { + builder.startObject(field).field("type", field.substring(5)).endObject(); + } + }); + } + + private void assertThrowsUnsupportedAndExpectErrorMessage(ThrowingRunnable runnable, String message) { + SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, runnable); + assertEquals(message, sqle.getMessage()); + } + + private void assertThrowsWritesUnsupportedForUpdate(ThrowingRunnable r) { + assertThrowsUnsupportedAndExpectErrorMessage(r, "Writes not supported"); + } + + private void validateErrorsForDateTimeTestsWithoutCalendar(CheckedFunction method) { + SQLException sqle; + for(Entry,JDBCType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1())); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } + + private void validateErrorsForDateTimeTestsWithCalendar(Calendar c, CheckedBiFunction method) { + SQLException sqle; + for(Entry,JDBCType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1(), c)); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } + + private float randomFloatBetween(float start, float end) { + float result = 0.0f; + while (result < start || result > end || Float.isNaN(result)) { + result = start + randomFloat() * (end - start); + } + + return result; + } + + private Long getMaxIntPlusOne() { + return Long.valueOf(Integer.MAX_VALUE) + 1L; + } + + private Double getMaxLongPlusOne() { + return Double.valueOf(Long.MAX_VALUE) + 1d; + } + + private Connection esJdbc(String timeZoneId) throws SQLException { + return randomBoolean() ? useDriverManager(timeZoneId) : useDataSource(timeZoneId); + } + + private Connection useDriverManager(String timeZoneId) throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + String address = "jdbc:es://" + elasticsearchAddress; + Properties connectionProperties = connectionProperties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, timeZoneId); + Connection connection = DriverManager.getConnection(address, connectionProperties); + + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } + + private Connection useDataSource(String timeZoneId) throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + JdbcDataSource dataSource = new JdbcDataSource(); + String address = "jdbc:es://" + elasticsearchAddress; + dataSource.setUrl(address); + Properties connectionProperties = connectionProperties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, timeZoneId); + dataSource.setProperties(connectionProperties); + Connection connection = dataSource.getConnection(); + + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java index 7621743481a4f..f5d559d9bf0b3 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java @@ -25,7 +25,8 @@ public void testSimpleExample() throws Exception { assertEquals("Don Quixote", results.getString(1)); assertEquals(1072, results.getInt(2)); SQLException e = expectThrows(SQLException.class, () -> results.getInt(1)); - assertTrue(e.getMessage(), e.getMessage().contains("unable to convert column 1 to an int")); + assertTrue(e.getMessage(), + e.getMessage().contains("Unable to convert value [Don Quixote] of type [VARCHAR] to an Integer")); assertFalse(results.next()); } // end::simple_example From f49f19d851dc057d542486421af143c9bebbd7d0 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 31 Aug 2018 16:36:57 +0300 Subject: [PATCH 36/52] Lazy evaluate java9home (#33301) --- .../groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 2 +- .../gradle/precommit/ForbiddenApisCliTask.java | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 6d36f606ad35a..3f8e5a13a4d40 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -211,7 +211,7 @@ class BuildPlugin implements Plugin { project.rootProject.ext.minimumRuntimeVersion = minimumRuntimeVersion project.rootProject.ext.inFipsJvm = inFipsJvm project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion) - project.rootProject.ext.java9Home = findJavaHome("9") + project.rootProject.ext.java9Home = "${-> findJavaHome("9")}" } project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java index 46e5d84a2f282..aaa9564b0dc0b 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java @@ -51,7 +51,8 @@ public class ForbiddenApisCliTask extends DefaultTask { private JavaVersion targetCompatibility; private FileCollection classesDirs; private SourceSet sourceSet; - private String javaHome; + // This needs to be an object so it can hold Groovy GStrings + private Object javaHome; @Input public JavaVersion getTargetCompatibility() { @@ -142,11 +143,11 @@ public Configuration getForbiddenAPIsConfiguration() { } @Input - public String getJavaHome() { + public Object getJavaHome() { return javaHome; } - public void setJavaHome(String javaHome) { + public void setJavaHome(Object javaHome) { this.javaHome = javaHome; } From a9216c418c5654e0346d29e7041bcc3724d7ac8a Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Fri, 31 Aug 2018 16:48:00 +0300 Subject: [PATCH 37/52] Fix pom for build-tools (#33300) Looks like `java-gradle-plugin` reconfigures the pom. Stop using it since we don't publish to Gradle plugin portal. --- buildSrc/build.gradle | 9 --------- .../elasticsearch.clusterformation.properties | 1 + 2 files changed, 1 insertion(+), 9 deletions(-) create mode 100644 buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index dce14b10fcb8c..da8ad788164d2 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -24,15 +24,6 @@ plugins { id 'groovy' } -gradlePlugin { - plugins { - simplePlugin { - id = 'elasticsearch.clusterformation' - implementationClass = 'org.elasticsearch.gradle.clusterformation.ClusterformationPlugin' - } - } -} - group = 'org.elasticsearch.gradle' String minimumGradleVersion = file('src/main/resources/minimumGradleVersion').text.trim() diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties new file mode 100644 index 0000000000000..dfd6cd9956a58 --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.clusterformation.ClusterformationPlugin From c168c037bf6890cf82809c5963fc6fbbb62a5a9a Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 31 Aug 2018 18:10:43 +0200 Subject: [PATCH 38/52] MINOR: Remove Dead Code from PathTrie (#33280) (#33306) * The array size checks are redundant since the array sizes are checked earlier in those methods too * The removed methods are just not used anywhere --- .../elasticsearch/common/path/PathTrie.java | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java index 5243809c64a1b..08787cea9df73 100644 --- a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java +++ b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java @@ -104,24 +104,12 @@ public void updateKeyWithNamedWildcard(String key) { namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}')); } - public boolean isWildcard() { - return isWildcard; - } - - public synchronized void addChild(TrieNode child) { - addInnerChild(child.key, child); - } - private void addInnerChild(String key, TrieNode child) { Map newChildren = new HashMap<>(children); newChildren.put(key, child); children = unmodifiableMap(newChildren); } - public TrieNode getChild(String key) { - return children.get(key); - } - public synchronized void insert(String[] path, int index, T value) { if (index >= path.length) return; @@ -302,7 +290,7 @@ public void insert(String path, T value) { } int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } root.insert(strings, index, value); @@ -327,7 +315,7 @@ public void insertOrUpdate(String path, T value, BiFunction updater) { } int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } root.insertOrUpdate(strings, index, value, updater); @@ -352,7 +340,7 @@ public T retrieve(String path, Map params, TrieMatchingMode trie int index = 0; // Supports initial delimiter. - if (strings.length > 0 && strings[0].isEmpty()) { + if (strings[0].isEmpty()) { index = 1; } From 908c010301d5d45e3019e9f6af5406ddcb105401 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 31 Aug 2018 18:11:16 +0200 Subject: [PATCH 39/52] TESTS: Fix Random Fail in MockTcpTransportTests (#33061) (#33307) * `foobar.txGet()` appears to return before `serviceB.stop()` returns, causing `ServiceB.close()` to run concurrently with the `stop` call and running into a race codition * Closes #32863 --- .../transport/AbstractSimpleTransportTestCase.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 4f16b173c38bb..8c654ab8883f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -764,6 +764,7 @@ public void onAfter() { public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); + final CountDownLatch latch3 = new CountDownLatch(1); try { serviceA.registerRequestHandler("internal:foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel) -> { @@ -773,6 +774,8 @@ public void testNotifyOnShutdown() throws Exception { serviceB.stop(); } catch (Exception e) { fail(e.getMessage()); + } finally { + latch3.countDown(); } }); TransportFuture foobar = serviceB.submitRequest(nodeA, "internal:foobar", @@ -784,6 +787,7 @@ public void testNotifyOnShutdown() throws Exception { } catch (TransportException ex) { } + latch3.await(); } finally { serviceB.close(); // make sure we are fully closed here otherwise we might run into assertions down the road serviceA.disconnectFromNode(nodeB); From ff56588453defdc913161e8c870f7e0bce3ba111 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Fri, 31 Aug 2018 17:47:05 +0100 Subject: [PATCH 40/52] Fixes SecurityIntegTestCase so it always adds at least one alias (#33296) * Fixes SecurityIntegTestCase so it always adds at least one alias `SecurityIntegTestCase.createIndicesWithRandomAliases` could randomly fail because its not gauranteed that the randomness of which aliases to add to the `IndicesAliasesRequestBuilder` would always select at least one alias to add. This change fixes the problem by keeping track of whether we have added an alias to teh request and forcing the last alias to be added if no other aliases have been added so far. Closes #30098 Closes #33123e * Addresses review comments --- .../java/org/elasticsearch/test/SecurityIntegTestCase.java | 7 ++++++- .../xpack/security/authz/ReadActionsTests.java | 1 - 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index cda7715521ada..b7802ac0e205e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -7,6 +7,7 @@ import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; + import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -427,14 +428,18 @@ protected void createIndicesWithRandomAliases(String... indices) { createIndex(indices); if (frequently()) { + boolean aliasAdded = false; IndicesAliasesRequestBuilder builder = client().admin().indices().prepareAliases(); for (String index : indices) { if (frequently()) { //one alias per index with prefix "alias-" builder.addAlias(index, "alias-" + index); + aliasAdded = true; } } - if (randomBoolean()) { + // If we get to this point and we haven't added an alias to the request we need to add one + // or the request will fail so use noAliasAdded to force adding the alias in this case + if (aliasAdded == false || randomBoolean()) { //one alias pointing to all indices for (String index : indices) { builder.addAlias(index, "alias"); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java index a88dafece3251..76568d3d48b5a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java @@ -102,7 +102,6 @@ public void testEmptyAuthorizedIndicesSearchForAll() { assertNoSearchHits(client().prepareSearch().get()); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/33123") public void testEmptyAuthorizedIndicesSearchForAllDisallowNoIndices() { createIndicesWithRandomAliases("index1", "index2"); IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch() From fab5644f12490f601b4aa2cb17ea40aa7847e6dc Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 31 Aug 2018 10:11:58 -0700 Subject: [PATCH 41/52] [DOCS] Rename X-Pack Commands section (#33005) --- docs/reference/commands/index.asciidoc | 8 ++++---- docs/reference/redirects.asciidoc | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 164d2fc0e84f0..134ac1edbd017 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -1,11 +1,11 @@ -[role="xpack"] -[[xpack-commands]] -= {xpack} Commands +[[commands]] += Command line tools [partintro] -- -{xpack} includes commands that help you configure security: +{es} provides the following tools for configuring security and performing other +tasks from the command line: * <> * <> diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 6f9f3b862efc5..db26bc84a4cc4 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -539,3 +539,8 @@ You can use the following APIs to add, remove, and retrieve role mappings: === Privilege APIs See <>. + +[role="exclude",id="xpack-commands"] +=== X-Pack commands + +See <>. From 79cc55ddbfb408c179b7a9403a3058ffbf967474 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 31 Aug 2018 10:50:43 -0700 Subject: [PATCH 42/52] [DOCS] Move rollup APIs to docs (#31450) --- docs/build.gradle | 267 ++++++++++++++++++ docs/reference/index.asciidoc | 2 +- docs/reference/rest-api/index.asciidoc | 2 +- .../reference}/rollup/api-quickref.asciidoc | 2 + .../rollup/apis}/delete-job.asciidoc | 1 + .../reference/rollup/apis}/get-job.asciidoc | 1 + .../reference/rollup/apis}/put-job.asciidoc | 1 + .../rollup/apis}/rollup-caps.asciidoc | 1 + .../rollup/apis}/rollup-index-caps.asciidoc | 0 .../rollup/apis}/rollup-job-config.asciidoc | 1 + .../rollup/apis}/rollup-search.asciidoc | 1 + .../reference/rollup/apis}/start-job.asciidoc | 1 + .../reference/rollup/apis}/stop-job.asciidoc | 1 + .../reference}/rollup/index.asciidoc | 2 + .../reference}/rollup/overview.asciidoc | 2 + .../rollup/rollup-agg-limitations.asciidoc | 2 + .../reference/rollup}/rollup-api.asciidoc | 19 +- .../rollup/rollup-getting-started.asciidoc | 2 + .../rollup/rollup-search-limitations.asciidoc | 2 + .../rollup/understanding-groups.asciidoc | 2 + 20 files changed, 301 insertions(+), 11 deletions(-) rename {x-pack/docs/en => docs/reference}/rollup/api-quickref.asciidoc (97%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/delete-job.asciidoc (99%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/get-job.asciidoc (99%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/put-job.asciidoc (99%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/rollup-caps.asciidoc (99%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/rollup-index-caps.asciidoc (100%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/rollup-job-config.asciidoc (99%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/rollup-search.asciidoc (99%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/start-job.asciidoc (99%) rename {x-pack/docs/en/rest-api/rollup => docs/reference/rollup/apis}/stop-job.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/rollup/index.asciidoc (97%) rename {x-pack/docs/en => docs/reference}/rollup/overview.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/rollup/rollup-agg-limitations.asciidoc (94%) rename {x-pack/docs/en/rest-api => docs/reference/rollup}/rollup-api.asciidoc (61%) rename {x-pack/docs/en => docs/reference}/rollup/rollup-getting-started.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/rollup/rollup-search-limitations.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/rollup/understanding-groups.asciidoc (99%) diff --git a/docs/build.gradle b/docs/build.gradle index c691253917f49..b833e60d69a65 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -77,6 +77,17 @@ buildRestTests.docs = fileTree(projectDir) { exclude 'build' // Just syntax examples exclude 'README.asciidoc' + // Broken code snippet tests + exclude 'reference/rollup/rollup-getting-started.asciidoc' + exclude 'reference/rollup/apis/rollup-job-config.asciidoc' + exclude 'reference/rollup/apis/rollup-index-caps.asciidoc' + exclude 'reference/rollup/apis/put-job.asciidoc' + exclude 'reference/rollup/apis/stop-job.asciidoc' + exclude 'reference/rollup/apis/start-job.asciidoc' + exclude 'reference/rollup/apis/rollup-search.asciidoc' + exclude 'reference/rollup/apis/delete-job.asciidoc' + exclude 'reference/rollup/apis/get-job.asciidoc' + exclude 'reference/rollup/apis/rollup-caps.asciidoc' } listSnippets.docs = buildRestTests.docs @@ -590,3 +601,259 @@ buildRestTests.setups['library'] = ''' {"index":{"_id": "The Moon is a Harsh Mistress"}} {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} ''' +buildRestTests.setups['sensor_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } +''' +buildRestTests.setups['sensor_started_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + + - do: + bulk: + index: sensor-1 + type: _doc + refresh: true + body: | + {"index":{}} + {"timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a"} + {"index":{}} + {"timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b"} + {"index":{}} + {"timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a"} + {"index":{}} + {"timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b"} + {"index":{}} + {"timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c"} + {"index":{}} + {"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"} + + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "* * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } + - do: + xpack.rollup.start_job: + id: "sensor" +''' + +buildRestTests.setups['sensor_index'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + load: + type: double + net_in: + type: long + net_out: + type: long + hostname: + type: keyword + datacenter: + type: keyword +''' + +buildRestTests.setups['sensor_prefab_data'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + indices.create: + index: sensor_rollup + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + node.terms.value: + type: keyword + temperature.sum.value: + type: double + temperature.max.value: + type: double + temperature.min.value: + type: double + timestamp.date_histogram.time_zone: + type: keyword + timestamp.date_histogram.interval: + type: keyword + timestamp.date_histogram.timestamp: + type: date + timestamp.date_histogram._count: + type: long + voltage.avg.value: + type: double + voltage.avg._count: + type: long + _rollup.id: + type: keyword + _rollup.version: + type: long + _meta: + _rollup: + sensor: + cron: "* * * * * ?" + rollup_index: "sensor_rollup" + index_pattern: "sensor-*" + timeout: "20s" + page_size: 1000 + groups: + date_histogram: + delay: "7d" + field: "timestamp" + interval: "1h" + time_zone: "UTC" + terms: + fields: + - "node" + id: sensor + metrics: + - field: "temperature" + metrics: + - min + - max + - sum + - field: "voltage" + metrics: + - avg + + - do: + bulk: + index: sensor_rollup + type: _doc + refresh: true + body: | + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":201.0,"temperature.max.value":201.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":201.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.800000190734863,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516640400000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516381200000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.099999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516554000000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516726800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":198.0,"temperature.max.value":198.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":198.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.599999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516467600000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + +''' diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 19bd49415b3cf..5a991817f6272 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -63,7 +63,7 @@ include::sql/index.asciidoc[] include::monitoring/index.asciidoc[] -include::{xes-repo-dir}/rollup/index.asciidoc[] +include::rollup/index.asciidoc[] include::rest-api/index.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 9ec57940dd299..e1d607948e1e3 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -23,7 +23,7 @@ include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] -include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] +include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] include::{xes-repo-dir}/rest-api/defs.asciidoc[] diff --git a/x-pack/docs/en/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc similarity index 97% rename from x-pack/docs/en/rollup/api-quickref.asciidoc rename to docs/reference/rollup/api-quickref.asciidoc index 5e99f1c69841c..1d372a03ddcfb 100644 --- a/x-pack/docs/en/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-api-quickref]] == API Quick Reference diff --git a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/delete-job.asciidoc rename to docs/reference/rollup/apis/delete-job.asciidoc index b795e0b28c760..37774560848c5 100644 --- a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-delete-job]] === Delete Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/get-job.asciidoc rename to docs/reference/rollup/apis/get-job.asciidoc index 96053dbfea64f..794d72480121b 100644 --- a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-job]] === Get Rollup Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/put-job.asciidoc rename to docs/reference/rollup/apis/put-job.asciidoc index 27889d985b8c8..79e30ae8dc99b 100644 --- a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-put-job]] === Create Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc rename to docs/reference/rollup/apis/rollup-caps.asciidoc index 1f233f195a09e..907efb94c1776 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-rollup-caps]] === Get Rollup Job Capabilities ++++ diff --git a/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc similarity index 100% rename from x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc rename to docs/reference/rollup/apis/rollup-index-caps.asciidoc diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc rename to docs/reference/rollup/apis/rollup-job-config.asciidoc index f937f28601a2e..3a917fb59f214 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-job-config]] === Rollup Job Configuration diff --git a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc rename to docs/reference/rollup/apis/rollup-search.asciidoc index 115ef8fb04381..8e7fc69a00a6b 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-search]] === Rollup Search ++++ diff --git a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/start-job.asciidoc rename to docs/reference/rollup/apis/start-job.asciidoc index 9a0a0a7e4f01c..cf44883895c4c 100644 --- a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-start-job]] === Start Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/stop-job.asciidoc rename to docs/reference/rollup/apis/stop-job.asciidoc index 6050740270503..5912b2d688b70 100644 --- a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-stop-job]] === Stop Job API ++++ diff --git a/x-pack/docs/en/rollup/index.asciidoc b/docs/reference/rollup/index.asciidoc similarity index 97% rename from x-pack/docs/en/rollup/index.asciidoc rename to docs/reference/rollup/index.asciidoc index 9ac89341bfe99..64dc233f82f6e 100644 --- a/x-pack/docs/en/rollup/index.asciidoc +++ b/docs/reference/rollup/index.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[xpack-rollup]] = Rolling up historical data diff --git a/x-pack/docs/en/rollup/overview.asciidoc b/docs/reference/rollup/overview.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/overview.asciidoc rename to docs/reference/rollup/overview.asciidoc index a9a983fbecc1d..b2570f647e72b 100644 --- a/x-pack/docs/en/rollup/overview.asciidoc +++ b/docs/reference/rollup/overview.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-overview]] == Overview diff --git a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc b/docs/reference/rollup/rollup-agg-limitations.asciidoc similarity index 94% rename from x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc rename to docs/reference/rollup/rollup-agg-limitations.asciidoc index cd20622d93c8d..9f8b6f66adeeb 100644 --- a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc +++ b/docs/reference/rollup/rollup-agg-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-agg-limitations]] == Rollup Aggregation Limitations diff --git a/x-pack/docs/en/rest-api/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc similarity index 61% rename from x-pack/docs/en/rest-api/rollup-api.asciidoc rename to docs/reference/rollup/rollup-api.asciidoc index 9a8ec00d77a0c..099686fb4329d 100644 --- a/x-pack/docs/en/rest-api/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-apis]] == Rollup APIs @@ -26,12 +27,12 @@ -include::rollup/delete-job.asciidoc[] -include::rollup/get-job.asciidoc[] -include::rollup/put-job.asciidoc[] -include::rollup/start-job.asciidoc[] -include::rollup/stop-job.asciidoc[] -include::rollup/rollup-caps.asciidoc[] -include::rollup/rollup-index-caps.asciidoc[] -include::rollup/rollup-search.asciidoc[] -include::rollup/rollup-job-config.asciidoc[] \ No newline at end of file +include::apis/delete-job.asciidoc[] +include::apis/get-job.asciidoc[] +include::apis/put-job.asciidoc[] +include::apis/start-job.asciidoc[] +include::apis/stop-job.asciidoc[] +include::apis/rollup-caps.asciidoc[] +include::apis/rollup-index-caps.asciidoc[] +include::apis/rollup-search.asciidoc[] +include::apis/rollup-job-config.asciidoc[] diff --git a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc b/docs/reference/rollup/rollup-getting-started.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/rollup-getting-started.asciidoc rename to docs/reference/rollup/rollup-getting-started.asciidoc index b6c913d7d34ac..8f99bc2c010ce 100644 --- a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc +++ b/docs/reference/rollup/rollup-getting-started.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-getting-started]] == Getting Started diff --git a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/rollup-search-limitations.asciidoc rename to docs/reference/rollup/rollup-search-limitations.asciidoc index 99f19a179ede7..43feeab9a2eef 100644 --- a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-search-limitations]] == Rollup Search Limitations diff --git a/x-pack/docs/en/rollup/understanding-groups.asciidoc b/docs/reference/rollup/understanding-groups.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/understanding-groups.asciidoc rename to docs/reference/rollup/understanding-groups.asciidoc index 803555b2d73f7..6321ab9b00f53 100644 --- a/x-pack/docs/en/rollup/understanding-groups.asciidoc +++ b/docs/reference/rollup/understanding-groups.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-understanding-groups]] == Understanding Groups From 7ea3d4a6d7e28073c1e2bd67391141156c161671 Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Fri, 31 Aug 2018 12:03:49 -0700 Subject: [PATCH 43/52] TEST: mute more SmokeTestWatcherWithSecurityIT tests tracked at https://github.com/elastic/elasticsearch/issues/33320 and https://github.com/elastic/elasticsearch/issues/30777 --- .../elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 538d54416bf69..71dd17f0684b0 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -135,6 +135,7 @@ protected Settings restAdminSettings() { } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchInputHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -242,6 +243,7 @@ public void testSearchTransformInsufficientPermissions() throws Exception { assertThat(response.getStatusLine().getStatusCode(), is(404)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30777") public void testIndexActionHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); From 54a53d2ce62f232d5b01c716961eccf983dedf21 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 31 Aug 2018 11:56:26 -0700 Subject: [PATCH 44/52] [DOCS] Moves ml folder from x-pack/docs to docs (#33248) --- docs/build.gradle | 6 ++ .../reference}/ml/aggregations.asciidoc | 4 +- .../reference}/ml/categories.asciidoc | 3 + .../reference}/ml/configuring.asciidoc | 12 +-- .../reference}/ml/customurl.asciidoc | 2 +- .../ml/detector-custom-rules.asciidoc | 7 +- .../reference}/ml/functions.asciidoc | 0 .../reference}/ml/functions/count.asciidoc | 7 ++ .../reference}/ml/functions/geo.asciidoc | 3 +- .../reference}/ml/functions/info.asciidoc | 0 .../reference}/ml/functions/metric.asciidoc | 0 .../reference}/ml/functions/rare.asciidoc | 0 .../reference}/ml/functions/sum.asciidoc | 0 .../reference}/ml/functions/time.asciidoc | 0 .../ml/images/ml-category-advanced.jpg | Bin .../ml/images/ml-category-anomalies.jpg | Bin .../reference}/ml/images/ml-categoryterms.jpg | Bin .../reference}/ml/images/ml-create-job.jpg | Bin .../reference}/ml/images/ml-create-jobs.jpg | Bin .../ml/images/ml-customurl-detail.jpg | Bin .../ml/images/ml-customurl-discover.jpg | Bin .../ml/images/ml-customurl-edit.jpg | Bin .../reference}/ml/images/ml-customurl.jpg | Bin .../reference}/ml/images/ml-data-dates.jpg | Bin .../reference}/ml/images/ml-data-keywords.jpg | Bin .../reference}/ml/images/ml-data-metrics.jpg | Bin .../ml/images/ml-data-topmetrics.jpg | Bin .../ml/images/ml-data-visualizer.jpg | Bin .../reference}/ml/images/ml-edit-job.jpg | Bin .../ml/images/ml-population-anomaly.jpg | Bin .../ml/images/ml-population-job.jpg | Bin .../ml/images/ml-population-results.jpg | Bin .../reference}/ml/images/ml-scriptfields.jpg | Bin .../reference}/ml/images/ml-start-feed.jpg | Bin .../reference}/ml/images/ml-stop-feed.jpg | Bin .../en => docs/reference}/ml/images/ml.jpg | Bin .../reference}/ml/populations.asciidoc | 5 +- .../reference}/ml/stopping-ml.asciidoc | 6 +- .../reference}/ml/transforms.asciidoc | 33 +++--- x-pack/docs/en/ml/api-quickref.asciidoc | 102 ------------------ 40 files changed, 50 insertions(+), 140 deletions(-) rename {x-pack/docs/en => docs/reference}/ml/aggregations.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/ml/categories.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/ml/configuring.asciidoc (88%) rename {x-pack/docs/en => docs/reference}/ml/customurl.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/ml/detector-custom-rules.asciidoc (97%) rename {x-pack/docs/en => docs/reference}/ml/functions.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/ml/functions/count.asciidoc (97%) rename {x-pack/docs/en => docs/reference}/ml/functions/geo.asciidoc (98%) rename {x-pack/docs/en => docs/reference}/ml/functions/info.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/ml/functions/metric.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/ml/functions/rare.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/ml/functions/sum.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/ml/functions/time.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-category-advanced.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-category-anomalies.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-categoryterms.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-create-job.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-create-jobs.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-customurl-detail.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-customurl-discover.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-customurl-edit.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-customurl.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-data-dates.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-data-keywords.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-data-metrics.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-data-topmetrics.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-data-visualizer.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-edit-job.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-population-anomaly.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-population-job.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-population-results.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-scriptfields.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-start-feed.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml-stop-feed.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/images/ml.jpg (100%) rename {x-pack/docs/en => docs/reference}/ml/populations.asciidoc (94%) rename {x-pack/docs/en => docs/reference}/ml/stopping-ml.asciidoc (94%) rename {x-pack/docs/en => docs/reference}/ml/transforms.asciidoc (97%) delete mode 100644 x-pack/docs/en/ml/api-quickref.asciidoc diff --git a/docs/build.gradle b/docs/build.gradle index b833e60d69a65..78d3aac48ce95 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -19,6 +19,12 @@ apply plugin: 'elasticsearch.docs-test' +/* List of files that have snippets that require a gold or platinum licence +and therefore cannot be tested yet... */ +buildRestTests.expectedUnconvertedCandidates = [ + 'reference/ml/transforms.asciidoc', +] + integTestCluster { /* Enable regexes in painless so our tests don't complain about example * snippets that use them. */ diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/docs/reference/ml/aggregations.asciidoc similarity index 99% rename from x-pack/docs/en/ml/aggregations.asciidoc rename to docs/reference/ml/aggregations.asciidoc index 07f465015696d..4b873ea790b1e 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/docs/reference/ml/aggregations.asciidoc @@ -41,7 +41,7 @@ PUT _xpack/ml/anomaly_detectors/farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_data] +// TEST[skip:setup:farequote_data] In this example, the `airline`, `responsetime`, and `time` fields are aggregations. @@ -90,7 +90,7 @@ PUT _xpack/ml/datafeeds/datafeed-farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] In this example, the aggregations have names that match the fields that they operate on. That is to say, the `max` aggregation is named `time` and its diff --git a/x-pack/docs/en/ml/categories.asciidoc b/docs/reference/ml/categories.asciidoc similarity index 99% rename from x-pack/docs/en/ml/categories.asciidoc rename to docs/reference/ml/categories.asciidoc index 21f71b871cbb9..03ebc8af76ee6 100644 --- a/x-pack/docs/en/ml/categories.asciidoc +++ b/docs/reference/ml/categories.asciidoc @@ -44,6 +44,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The `categorization_field_name` property indicates which field will be categorized. <2> The resulting categories are used in a detector by setting `by_field_name`, @@ -127,6 +128,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs2 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The {ref}/analysis-pattern-replace-charfilter.html[`pattern_replace` character filter] here achieves exactly the same as the `categorization_filters` in the first @@ -193,6 +195,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs3 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> Tokens basically consist of hyphens, digits, letters, underscores and dots. <2> By default, categorization ignores tokens that begin with a digit. <3> By default, categorization also ignores tokens that are hexadecimal numbers. diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/docs/reference/ml/configuring.asciidoc similarity index 88% rename from x-pack/docs/en/ml/configuring.asciidoc rename to docs/reference/ml/configuring.asciidoc index e35f046a82bd9..9b6149d662aa8 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/docs/reference/ml/configuring.asciidoc @@ -36,20 +36,20 @@ The scenarios in this section describe some best practices for generating useful * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/customurl.asciidoc include::customurl.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/aggregations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/aggregations.asciidoc include::aggregations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/categories.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/categories.asciidoc include::categories.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/populations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/populations.asciidoc include::populations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/transforms.asciidoc include::transforms.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/detector-custom-rules.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/detector-custom-rules.asciidoc include::detector-custom-rules.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/ml/customurl.asciidoc b/docs/reference/ml/customurl.asciidoc similarity index 99% rename from x-pack/docs/en/ml/customurl.asciidoc rename to docs/reference/ml/customurl.asciidoc index 7c197084c0e5f..95f4f5f938f0a 100644 --- a/x-pack/docs/en/ml/customurl.asciidoc +++ b/docs/reference/ml/customurl.asciidoc @@ -106,7 +106,7 @@ POST _xpack/ml/anomaly_detectors/sample_job/_update } ---------------------------------- //CONSOLE -//TEST[setup:sample_job] +//TEST[skip:setup:sample_job] When you click this custom URL in the anomalies table in {kib}, it opens up the *Discover* page and displays source data for the period one hour before and diff --git a/x-pack/docs/en/ml/detector-custom-rules.asciidoc b/docs/reference/ml/detector-custom-rules.asciidoc similarity index 97% rename from x-pack/docs/en/ml/detector-custom-rules.asciidoc rename to docs/reference/ml/detector-custom-rules.asciidoc index 8513c7e4d2566..02881f4cc4313 100644 --- a/x-pack/docs/en/ml/detector-custom-rules.asciidoc +++ b/docs/reference/ml/detector-custom-rules.asciidoc @@ -39,6 +39,7 @@ PUT _xpack/ml/filters/safe_domains } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Now, we can create our job specifying a scope that uses the `safe_domains` filter for the `highest_registered_domain` field: @@ -70,6 +71,7 @@ PUT _xpack/ml/anomaly_detectors/dns_exfiltration_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] As time advances and we see more data and more results, we might encounter new domains that we want to add in the filter. We can do that by using the @@ -83,7 +85,7 @@ POST _xpack/ml/filters/safe_domains/_update } ---------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] Note that we can use any of the `partition_field_name`, `over_field_name`, or `by_field_name` fields in the `scope`. @@ -123,6 +125,7 @@ PUT _xpack/ml/anomaly_detectors/scoping_multiple_fields } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Such a detector will skip results when the values of all 3 scoped fields are included in the referenced filters. @@ -166,6 +169,7 @@ PUT _xpack/ml/anomaly_detectors/cpu_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] When there are multiple conditions they are combined with a logical `and`. This is useful when we want the rule to apply to a range. We simply create @@ -205,6 +209,7 @@ PUT _xpack/ml/anomaly_detectors/rule_with_range } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] ==== Custom rules in the life-cycle of a job diff --git a/x-pack/docs/en/ml/functions.asciidoc b/docs/reference/ml/functions.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions.asciidoc rename to docs/reference/ml/functions.asciidoc diff --git a/x-pack/docs/en/ml/functions/count.asciidoc b/docs/reference/ml/functions/count.asciidoc similarity index 97% rename from x-pack/docs/en/ml/functions/count.asciidoc rename to docs/reference/ml/functions/count.asciidoc index a2dc5645b61ae..abbbd118ffebf 100644 --- a/x-pack/docs/en/ml/functions/count.asciidoc +++ b/docs/reference/ml/functions/count.asciidoc @@ -59,6 +59,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example is probably the simplest possible analysis. It identifies time buckets during which the overall count of events is higher or lower than @@ -86,6 +87,7 @@ PUT _xpack/ml/anomaly_detectors/example2 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_count` function in a detector in your job, it models the event rate for each error code. It detects users that generate an @@ -110,6 +112,7 @@ PUT _xpack/ml/anomaly_detectors/example3 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] In this example, the function detects when the count of events for a status code is lower than usual. @@ -136,6 +139,7 @@ PUT _xpack/ml/anomaly_detectors/example4 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you are analyzing an aggregated `events_per_min` field, do not use a sum function (for example, `sum(events_per_min)`). Instead, use the count function @@ -200,6 +204,7 @@ PUT _xpack/ml/anomaly_detectors/example5 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_non_zero_count` function in a detector in your job, it models the count of events for the `signaturename` field. It ignores any buckets @@ -253,6 +258,7 @@ PUT _xpack/ml/anomaly_detectors/example6 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This `distinct_count` function detects when a system has an unusual number of logged in users. When you use this function in a detector in your job, it @@ -278,6 +284,7 @@ PUT _xpack/ml/anomaly_detectors/example7 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example detects instances of port scanning. When you use this function in a detector in your job, it models the distinct count of ports. It also detects the diff --git a/x-pack/docs/en/ml/functions/geo.asciidoc b/docs/reference/ml/functions/geo.asciidoc similarity index 98% rename from x-pack/docs/en/ml/functions/geo.asciidoc rename to docs/reference/ml/functions/geo.asciidoc index 5bcf6c3394558..461ab825ff5b2 100644 --- a/x-pack/docs/en/ml/functions/geo.asciidoc +++ b/docs/reference/ml/functions/geo.asciidoc @@ -47,6 +47,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `lat_long` function in a detector in your job, it detects anomalies where the geographic location of a credit card transaction is @@ -98,6 +99,6 @@ PUT _xpack/ml/datafeeds/datafeed-test2 } -------------------------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] For more information, see <>. diff --git a/x-pack/docs/en/ml/functions/info.asciidoc b/docs/reference/ml/functions/info.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/info.asciidoc rename to docs/reference/ml/functions/info.asciidoc diff --git a/x-pack/docs/en/ml/functions/metric.asciidoc b/docs/reference/ml/functions/metric.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/metric.asciidoc rename to docs/reference/ml/functions/metric.asciidoc diff --git a/x-pack/docs/en/ml/functions/rare.asciidoc b/docs/reference/ml/functions/rare.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/rare.asciidoc rename to docs/reference/ml/functions/rare.asciidoc diff --git a/x-pack/docs/en/ml/functions/sum.asciidoc b/docs/reference/ml/functions/sum.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/sum.asciidoc rename to docs/reference/ml/functions/sum.asciidoc diff --git a/x-pack/docs/en/ml/functions/time.asciidoc b/docs/reference/ml/functions/time.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/time.asciidoc rename to docs/reference/ml/functions/time.asciidoc diff --git a/x-pack/docs/en/ml/images/ml-category-advanced.jpg b/docs/reference/ml/images/ml-category-advanced.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-advanced.jpg rename to docs/reference/ml/images/ml-category-advanced.jpg diff --git a/x-pack/docs/en/ml/images/ml-category-anomalies.jpg b/docs/reference/ml/images/ml-category-anomalies.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-anomalies.jpg rename to docs/reference/ml/images/ml-category-anomalies.jpg diff --git a/x-pack/docs/en/ml/images/ml-categoryterms.jpg b/docs/reference/ml/images/ml-categoryterms.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-categoryterms.jpg rename to docs/reference/ml/images/ml-categoryterms.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-job.jpg b/docs/reference/ml/images/ml-create-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-job.jpg rename to docs/reference/ml/images/ml-create-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-jobs.jpg b/docs/reference/ml/images/ml-create-jobs.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-jobs.jpg rename to docs/reference/ml/images/ml-create-jobs.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-detail.jpg b/docs/reference/ml/images/ml-customurl-detail.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-detail.jpg rename to docs/reference/ml/images/ml-customurl-detail.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-discover.jpg b/docs/reference/ml/images/ml-customurl-discover.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-discover.jpg rename to docs/reference/ml/images/ml-customurl-discover.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-edit.jpg b/docs/reference/ml/images/ml-customurl-edit.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-edit.jpg rename to docs/reference/ml/images/ml-customurl-edit.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl.jpg b/docs/reference/ml/images/ml-customurl.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl.jpg rename to docs/reference/ml/images/ml-customurl.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-dates.jpg b/docs/reference/ml/images/ml-data-dates.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-dates.jpg rename to docs/reference/ml/images/ml-data-dates.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-keywords.jpg b/docs/reference/ml/images/ml-data-keywords.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-keywords.jpg rename to docs/reference/ml/images/ml-data-keywords.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-metrics.jpg b/docs/reference/ml/images/ml-data-metrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-metrics.jpg rename to docs/reference/ml/images/ml-data-metrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-topmetrics.jpg b/docs/reference/ml/images/ml-data-topmetrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-topmetrics.jpg rename to docs/reference/ml/images/ml-data-topmetrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-visualizer.jpg b/docs/reference/ml/images/ml-data-visualizer.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-visualizer.jpg rename to docs/reference/ml/images/ml-data-visualizer.jpg diff --git a/x-pack/docs/en/ml/images/ml-edit-job.jpg b/docs/reference/ml/images/ml-edit-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-edit-job.jpg rename to docs/reference/ml/images/ml-edit-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-anomaly.jpg b/docs/reference/ml/images/ml-population-anomaly.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-anomaly.jpg rename to docs/reference/ml/images/ml-population-anomaly.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-job.jpg b/docs/reference/ml/images/ml-population-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-job.jpg rename to docs/reference/ml/images/ml-population-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-results.jpg b/docs/reference/ml/images/ml-population-results.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-results.jpg rename to docs/reference/ml/images/ml-population-results.jpg diff --git a/x-pack/docs/en/ml/images/ml-scriptfields.jpg b/docs/reference/ml/images/ml-scriptfields.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-scriptfields.jpg rename to docs/reference/ml/images/ml-scriptfields.jpg diff --git a/x-pack/docs/en/ml/images/ml-start-feed.jpg b/docs/reference/ml/images/ml-start-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-start-feed.jpg rename to docs/reference/ml/images/ml-start-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml-stop-feed.jpg b/docs/reference/ml/images/ml-stop-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-stop-feed.jpg rename to docs/reference/ml/images/ml-stop-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml.jpg b/docs/reference/ml/images/ml.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml.jpg rename to docs/reference/ml/images/ml.jpg diff --git a/x-pack/docs/en/ml/populations.asciidoc b/docs/reference/ml/populations.asciidoc similarity index 94% rename from x-pack/docs/en/ml/populations.asciidoc rename to docs/reference/ml/populations.asciidoc index bf0dd2ad7d7bb..ed58c117f17d7 100644 --- a/x-pack/docs/en/ml/populations.asciidoc +++ b/docs/reference/ml/populations.asciidoc @@ -51,14 +51,11 @@ PUT _xpack/ml/anomaly_detectors/population } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> This `over_field_name` property indicates that the metrics for each user ( as identified by their `username` value) are analyzed relative to other users in each bucket. -//TO-DO: Per sophiec20 "Perhaps add the datafeed config and add a query filter to -//include only workstations as servers and printers would behave differently -//from the population - If your data is stored in {es}, you can use the population job wizard in {kib} to create a job with these same properties. For example, the population job wizard provides the following job settings: diff --git a/x-pack/docs/en/ml/stopping-ml.asciidoc b/docs/reference/ml/stopping-ml.asciidoc similarity index 94% rename from x-pack/docs/en/ml/stopping-ml.asciidoc rename to docs/reference/ml/stopping-ml.asciidoc index c0be2d947cdc7..17505a02d1521 100644 --- a/x-pack/docs/en/ml/stopping-ml.asciidoc +++ b/docs/reference/ml/stopping-ml.asciidoc @@ -28,7 +28,7 @@ request stops the `feed1` {dfeed}: POST _xpack/ml/datafeeds/datafeed-total-requests/_stop -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -49,6 +49,7 @@ If you are upgrading your cluster, you can use the following request to stop all POST _xpack/ml/datafeeds/_all/_stop ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] [float] [[closing-ml-jobs]] @@ -67,7 +68,7 @@ example, the following request closes the `job1` job: POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -86,3 +87,4 @@ all open jobs on the cluster: POST _xpack/ml/anomaly_detectors/_all/_close ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] diff --git a/x-pack/docs/en/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc similarity index 97% rename from x-pack/docs/en/ml/transforms.asciidoc rename to docs/reference/ml/transforms.asciidoc index c4b4d56029748..a2276895fc9e8 100644 --- a/x-pack/docs/en/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -95,7 +95,7 @@ PUT /my_index/my_type/1 } ---------------------------------- // CONSOLE -// TESTSETUP +// TEST[skip:SETUP] <1> In this example, string fields are mapped as `keyword` fields to support aggregation. If you want both a full text (`text`) and a keyword (`keyword`) version of the same field, use multi-fields. For more information, see @@ -144,7 +144,7 @@ PUT _xpack/ml/datafeeds/datafeed-test1 } ---------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> A script field named `total_error_count` is referenced in the detector within the job. <2> The script field is defined in the {dfeed}. @@ -163,7 +163,7 @@ You can preview the contents of the {dfeed} by using the following API: GET _xpack/ml/datafeeds/datafeed-test1/_preview ---------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] In this example, the API returns the following results, which contain a sum of the `error_count` and `aborted_count` values: @@ -177,8 +177,6 @@ the `error_count` and `aborted_count` values: } ] ---------------------------------- -// TESTRESPONSE - NOTE: This example demonstrates how to use script fields, but it contains insufficient data to generate meaningful results. For a full demonstration of @@ -254,7 +252,7 @@ PUT _xpack/ml/datafeeds/datafeed-test2 GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> The script field has a rather generic name in this case, since it will be used for various tests in the subsequent examples. <2> The script field uses the plus (+) operator to concatenate strings. @@ -271,7 +269,6 @@ and "SMITH " have been concatenated and an underscore was added: } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform3]] .Example 3: Trimming strings @@ -292,7 +289,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `trim()` function to trim extra white space from a string. @@ -308,7 +305,6 @@ has been trimmed to "SMITH": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform4]] .Example 4: Converting strings to lowercase @@ -329,7 +325,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `toLowerCase` function to convert a string to all lowercase letters. Likewise, you can use the `toUpperCase{}` function to convert a string to uppercase letters. @@ -346,7 +342,6 @@ has been converted to "joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform5]] .Example 5: Converting strings to mixed case formats @@ -367,7 +362,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field is a more complicated example of case manipulation. It uses the `subString()` function to capitalize the first letter of a string and converts the remaining characters to lowercase. @@ -384,7 +379,6 @@ has been converted to "Joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform6]] .Example 6: Replacing tokens @@ -405,7 +399,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses regular expressions to replace white space with underscores. @@ -421,7 +415,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform7]] .Example 7: Regular expression matching and concatenation @@ -442,7 +435,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field looks for a specific regular expression pattern and emits the matched groups as a concatenated string. If no match is found, it emits an empty string. @@ -459,7 +452,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform8]] .Example 8: Splitting strings by domain name @@ -509,7 +501,7 @@ PUT _xpack/ml/datafeeds/datafeed-test3 GET _xpack/ml/datafeeds/datafeed-test3/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] If you have a single field that contains a well-formed DNS domain name, you can use the `domainSplit()` function to split the string into its highest registered @@ -537,7 +529,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform9]] .Example 9: Transforming geo_point data @@ -583,7 +574,7 @@ PUT _xpack/ml/datafeeds/datafeed-test4 GET _xpack/ml/datafeeds/datafeed-test4/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] In {es}, location data can be stored in `geo_point` fields but this data type is not supported natively in {xpackml} analytics. This example of a script field @@ -602,4 +593,4 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE + diff --git a/x-pack/docs/en/ml/api-quickref.asciidoc b/x-pack/docs/en/ml/api-quickref.asciidoc deleted file mode 100644 index be74167862e15..0000000000000 --- a/x-pack/docs/en/ml/api-quickref.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -[role="xpack"] -[[ml-api-quickref]] -== API quick reference - -All {ml} endpoints have the following base: - -[source,js] ----- -/_xpack/ml/ ----- -// NOTCONSOLE - -The main {ml} resources can be accessed with a variety of endpoints: - -* <>: Create and manage {ml} jobs -* <>: Select data from {es} to be analyzed -* <>: Access the results of a {ml} job -* <>: Manage model snapshots -//* <>: Validate subsections of job configurations - -[float] -[[ml-api-jobs]] -=== /anomaly_detectors/ - -* {ref}/ml-put-job.html[PUT /anomaly_detectors/+++]: Create a job -* {ref}/ml-open-job.html[POST /anomaly_detectors//_open]: Open a job -* {ref}/ml-post-data.html[POST /anomaly_detectors//_data]: Send data to a job -* {ref}/ml-get-job.html[GET /anomaly_detectors]: List jobs -* {ref}/ml-get-job.html[GET /anomaly_detectors/+++]: Get job details -* {ref}/ml-get-job-stats.html[GET /anomaly_detectors//_stats]: Get job statistics -* {ref}/ml-update-job.html[POST /anomaly_detectors//_update]: Update certain properties of the job configuration -* {ref}/ml-flush-job.html[POST anomaly_detectors//_flush]: Force a job to analyze buffered data -* {ref}/ml-forecast.html[POST anomaly_detectors//_forecast]: Forecast future job behavior -* {ref}/ml-close-job.html[POST /anomaly_detectors//_close]: Close a job -* {ref}/ml-delete-job.html[DELETE /anomaly_detectors/+++]: Delete a job - -[float] -[[ml-api-calendars]] -=== /calendars/ - -* {ref}/ml-put-calendar.html[PUT /calendars/+++]: Create a calendar -* {ref}/ml-post-calendar-event.html[POST /calendars/+++/events]: Add a scheduled event to a calendar -* {ref}/ml-put-calendar-job.html[PUT /calendars/+++/jobs/+++]: Associate a job with a calendar -* {ref}/ml-get-calendar.html[GET /calendars/+++]: Get calendar details -* {ref}/ml-get-calendar-event.html[GET /calendars/+++/events]: Get scheduled event details -* {ref}/ml-delete-calendar-event.html[DELETE /calendars/+++/events/+++]: Remove a scheduled event from a calendar -* {ref}/ml-delete-calendar-job.html[DELETE /calendars/+++/jobs/+++]: Disassociate a job from a calendar -* {ref}/ml-delete-calendar.html[DELETE /calendars/+++]: Delete a calendar - -[float] -[[ml-api-filters]] -=== /filters/ - -* {ref}/ml-put-filter.html[PUT /filters/+++]: Create a filter -* {ref}/ml-update-filter.html[POST /filters/+++/_update]: Update a filter -* {ref}/ml-get-filter.html[GET /filters/+++]: List filters -* {ref}/ml-delete-filter.html[DELETE /filter/+++]: Delete a filter - -[float] -[[ml-api-datafeeds]] -=== /datafeeds/ - -* {ref}/ml-put-datafeed.html[PUT /datafeeds/+++]: Create a {dfeed} -* {ref}/ml-start-datafeed.html[POST /datafeeds//_start]: Start a {dfeed} -* {ref}/ml-get-datafeed.html[GET /datafeeds]: List {dfeeds} -* {ref}/ml-get-datafeed.html[GET /datafeeds/+++]: Get {dfeed} details -* {ref}/ml-get-datafeed-stats.html[GET /datafeeds//_stats]: Get statistical information for {dfeeds} -* {ref}/ml-preview-datafeed.html[GET /datafeeds//_preview]: Get a preview of a {dfeed} -* {ref}/ml-update-datafeed.html[POST /datafeeds//_update]: Update certain settings for a {dfeed} -* {ref}/ml-stop-datafeed.html[POST /datafeeds//_stop]: Stop a {dfeed} -* {ref}/ml-delete-datafeed.html[DELETE /datafeeds/+++]: Delete {dfeed} - -[float] -[[ml-api-results]] -=== /results/ - -* {ref}/ml-get-bucket.html[GET /results/buckets]: List the buckets in the results -* {ref}/ml-get-bucket.html[GET /results/buckets/+++]: Get bucket details -* {ref}/ml-get-overall-buckets.html[GET /results/overall_buckets]: Get overall bucket results for multiple jobs -* {ref}/ml-get-category.html[GET /results/categories]: List the categories in the results -* {ref}/ml-get-category.html[GET /results/categories/+++]: Get category details -* {ref}/ml-get-influencer.html[GET /results/influencers]: Get influencer details -* {ref}/ml-get-record.html[GET /results/records]: Get records from the results - -[float] -[[ml-api-snapshots]] -=== /model_snapshots/ - -* {ref}/ml-get-snapshot.html[GET /model_snapshots]: List model snapshots -* {ref}/ml-get-snapshot.html[GET /model_snapshots/+++]: Get model snapshot details -* {ref}/ml-revert-snapshot.html[POST /model_snapshots//_revert]: Revert a model snapshot -* {ref}/ml-update-snapshot.html[POST /model_snapshots//_update]: Update certain settings for a model snapshot -* {ref}/ml-delete-snapshot.html[DELETE /model_snapshots/+++]: Delete a model snapshot - -//// -[float] -[[ml-api-validate]] -=== /validate/ - -* {ref}/ml-valid-detector.html[POST /anomaly_detectors/_validate/detector]: Validate a detector -* {ref}/ml-valid-job.html[POST /anomaly_detectors/_validate]: Validate a job -//// From e35515396bc543ff8cf764422d59175334a1b984 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Fri, 31 Aug 2018 13:33:14 -0600 Subject: [PATCH 45/52] Mute SmokeTestWatcherWithSecurityIT testsi Tests from the SmokeTestWatcherWithSecurityIT suite have been failing occasionally. This commit mutes all the tests. This is tracked in --- .../elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 71dd17f0684b0..b4d60d3708ecc 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -186,6 +186,7 @@ public void testSearchInputWithInsufficientPrivileges() throws Exception { assertThat(conditionMet, is(false)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29893") public void testSearchTransformHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -268,6 +269,7 @@ public void testIndexActionHasPermissions() throws Exception { assertThat(spam, is("eggs")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30777") public void testIndexActionInsufficientPrivileges() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); From cf28ba68022b5aa8952d756224a25031a4c4d679 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Fri, 31 Aug 2018 13:37:22 -0600 Subject: [PATCH 46/52] Fix AwaitsFix issue number In the previous commit where SmokeTestWatcherWithSecurityIT tests were muted, I added the incorrect issue numbers. This commit fixes this. The issue for the tests is #33320. --- .../smoketest/SmokeTestWatcherWithSecurityIT.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index b4d60d3708ecc..17fbf0769fd47 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -186,7 +186,7 @@ public void testSearchInputWithInsufficientPrivileges() throws Exception { assertThat(conditionMet, is(false)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29893") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchTransformHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -269,7 +269,7 @@ public void testIndexActionHasPermissions() throws Exception { assertThat(spam, is("eggs")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30777") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testIndexActionInsufficientPrivileges() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); From 6de8c6b79d206ecba097a23ecab85753010f551f Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Fri, 31 Aug 2018 21:29:06 +0200 Subject: [PATCH 47/52] drop `index.shard.check_on_startup: fix` (#32279) Relates #31389 (cherry picked from commit 3d82a30fadd035228f29ccc00d6c5bab71e9adf6) --- docs/reference/index-modules.asciidoc | 4 +- .../elasticsearch/index/shard/IndexShard.java | 22 +-- .../org/elasticsearch/index/store/Store.java | 15 +- .../index/shard/IndexShardTests.java | 157 +++++++++++++++++- .../index/shard/IndexShardTestCase.java | 107 +++++++++--- 5 files changed, 249 insertions(+), 56 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index de57db1f89742..001996de398ce 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -65,9 +65,7 @@ corruption is detected, it will prevent the shard from being opened. Accepts: `fix`:: - Check for both physical and logical corruption. Segments that were reported - as corrupted will be automatically removed. This option *may result in data loss*. - Use with extreme caution! + The same as `false`. This option is deprecated and will be completely removed in 7.0. WARNING: Expert only. Checking shards may take a lot of time on large indices. -- diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 378df05208c6f..6db5c21f56626 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -297,6 +297,10 @@ public IndexShard( logger.debug("state: [CREATED]"); this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); + if ("fix".equals(checkIndexOnStartup)) { + deprecationLogger.deprecated("Setting [index.shard.check_on_startup] is set to deprecated value [fix], " + + "which has no effect and will not be accepted in future"); + } this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); final String aId = shardRouting.allocationId().getId(); this.globalCheckpointListeners = new GlobalCheckpointListeners(shardId, threadPool.executor(ThreadPool.Names.LISTENER), logger); @@ -1354,7 +1358,7 @@ private void innerOpenEngineAndTranslog() throws IOException { } recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); // also check here, before we apply the translog - if (Booleans.isTrue(checkIndexOnStartup)) { + if (Booleans.isTrue(checkIndexOnStartup) || "checksum".equals(checkIndexOnStartup)) { try { checkIndex(); } catch (IOException ex) { @@ -1958,6 +1962,9 @@ void checkIndex() throws IOException { if (store.tryIncRef()) { try { doCheckIndex(); + } catch (IOException e) { + store.markStoreCorrupted(e); + throw e; } finally { store.decRef(); } @@ -2001,18 +2008,7 @@ private void doCheckIndex() throws IOException { return; } logger.warn("check index [failure]\n{}", os.bytes().utf8ToString()); - if ("fix".equals(checkIndexOnStartup)) { - if (logger.isDebugEnabled()) { - logger.debug("fixing index, writing new segments file ..."); - } - store.exorciseIndex(status); - if (logger.isDebugEnabled()) { - logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName); - } - } else { - // only throw a failure if we are not going to fix the index - throw new IllegalStateException("index check failure but can't fix it"); - } + throw new IOException("index check failure"); } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 03b641b69ce57..fa1534ef922c7 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -134,7 +134,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0 static final int VERSION_START = 0; static final int VERSION = VERSION_WRITE_THROWABLE; - static final String CORRUPTED = "corrupted_"; + // public is for test purposes + public static final String CORRUPTED = "corrupted_"; public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), Property.IndexScope); @@ -360,18 +361,6 @@ public CheckIndex.Status checkIndex(PrintStream out) throws IOException { } } - /** - * Repairs the index using the previous returned status from {@link #checkIndex(PrintStream)}. - */ - public void exorciseIndex(CheckIndex.Status status) throws IOException { - metadataLock.writeLock().lock(); - try (CheckIndex checkIndex = new CheckIndex(directory)) { - checkIndex.exorciseIndex(status); - } finally { - metadataLock.writeLock().unlock(); - } - } - public StoreStats stats() throws IOException { ensureOpen(); return new StoreStats(directory.estimateSize()); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 1cb9cb7c3d47f..83d930c3fdba8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -110,6 +111,7 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.VersionUtils; @@ -118,7 +120,11 @@ import java.io.IOException; import java.nio.charset.Charset; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -1229,7 +1235,7 @@ public String[] listAll() throws IOException { }; try (Store store = createStore(shardId, new IndexSettings(metaData, Settings.EMPTY), directory)) { - IndexShard shard = newShard(shardRouting, shardPath, metaData, store, + IndexShard shard = newShard(shardRouting, shardPath, metaData, i -> store, null, new InternalEngineFactory(), () -> { }, EMPTY_EVENT_LISTENER); AtomicBoolean failureCallbackTriggered = new AtomicBoolean(false); @@ -2569,6 +2575,143 @@ public void testReadSnapshotConcurrently() throws IOException, InterruptedExcept closeShards(newShard); } + public void testIndexCheckOnStartup() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final Path indexPath = corruptIndexFile(shardPath); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + + assertThat("corruption marker should not be there", corruptedMarkerCount.get(), equalTo(0)); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE + ); + // start shard and perform index check on startup. It enforce shard to fail due to corrupted index files + final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) + .settings(Settings.builder() + .put(indexShard.indexSettings.getSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("true", "checksum"))) + .build(); + + IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException indexShardRecoveryException = + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + assertThat(indexShardRecoveryException.getMessage(), equalTo("failed recovery")); + + // check that corrupt marker is there + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + try { + closeShards(corruptedShard); + } catch (RuntimeException e) { + assertThat(e.getMessage(), equalTo("CheckIndex failed")); + } + } + + public void testShardDoesNotStartIfCorruptedMarkerIsPresent() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE + ); + final IndexMetaData indexMetaData = indexShard.indexSettings().getIndexMetaData(); + + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + + // create corrupted marker + final String corruptionMessage = "fake ioexception"; + try(Store store = createStore(indexShard.indexSettings(), shardPath)) { + store.markStoreCorrupted(new IOException(corruptionMessage)); + } + + // try to start shard on corrupted files + final IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception1 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard, true)); + assertThat(exception1.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + // try to start another time shard on corrupted files + final IndexShard corruptedShard2 = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception2 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard2, true)); + assertThat(exception2.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard2); + + // check that corrupt marker is there + corruptedMarkerCount.set(0); + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store still has a single corrupt marker", corruptedMarkerCount.get(), equalTo(1)); + } + + private Path corruptIndexFile(ShardPath shardPath) throws IOException { + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + final Path[] filesToCorrupt = + Files.walk(indexPath) + .filter(p -> { + final String name = p.getFileName().toString(); + return Files.isRegularFile(p) + && name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS + && IndexWriter.WRITE_LOCK_NAME.equals(name) == false + && name.startsWith("segments_") == false && name.endsWith(".si") == false; + }) + .toArray(Path[]::new); + CorruptionUtils.corruptFile(random(), filesToCorrupt); + return indexPath; + } + /** * Simulates a scenario that happens when we are async fetching snapshot metadata from GatewayService * and checking index concurrently. This should always be possible without any exception. @@ -2592,7 +2735,7 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) .settings(Settings.builder() .put(indexShard.indexSettings.getSettings()) - .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum", "fix"))) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum"))) .build(); final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, null, null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); @@ -2634,6 +2777,16 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { closeShards(newShard); } + public void testCheckOnStartupDeprecatedValue() throws Exception { + final Settings settings = Settings.builder().put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "fix").build(); + + final IndexShard newShard = newShard(true, settings); + closeShards(newShard); + + assertWarnings("Setting [index.shard.check_on_startup] is set to deprecated value [fix], " + + "which has no effect and will not be accepted in future"); + } + class Result { private final int localCheckpoint; private final int maxSeqNo; diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 82ee78cfee1c7..56c78930244e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; @@ -163,7 +164,6 @@ public Settings threadPoolSettings() { return Settings.EMPTY; } - protected Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex())); } @@ -176,7 +176,6 @@ public Directory newDirectory() throws IOException { } }; return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - } /** @@ -186,31 +185,64 @@ public Directory newDirectory() throws IOException { * (ready to recover from another shard) */ protected IndexShard newShard(boolean primary) throws IOException { - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), randomAlphaOfLength(10), primary, - ShardRoutingState.INITIALIZING, - primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); - return newShard(shardRouting); + return newShard(primary, Settings.EMPTY); } /** - * creates a new initializing shard. The shard will have its own unique data path. + * Creates a new initializing shard. The shard will have its own unique data path. * - * @param shardRouting the {@link ShardRouting} to use for this shard - * @param listeners an optional set of listeners to add to the shard + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + */ + protected IndexShard newShard(final boolean primary, final Settings settings) throws IOException { + return newShard(primary, settings, new InternalEngineFactory()); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + */ + protected IndexShard newShard(boolean primary, Settings settings, EngineFactory engineFactory) throws IOException { + final RecoverySource recoverySource = + primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE; + final ShardRouting shardRouting = + TestShardRouting.newShardRouting( + new ShardId("index", "_na_", 0), randomAlphaOfLength(10), primary, ShardRoutingState.INITIALIZING, recoverySource); + return newShard(shardRouting, settings, engineFactory); + } + + protected IndexShard newShard(ShardRouting shardRouting, final IndexingOperationListener... listeners) throws IOException { + return newShard(shardRouting, Settings.EMPTY, new InternalEngineFactory(), listeners); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param shardRouting the {@link ShardRouting} to use for this shard + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard( final ShardRouting shardRouting, + final Settings settings, + final EngineFactory engineFactory, final IndexingOperationListener... listeners) throws IOException { assert shardRouting.initializing() : shardRouting; - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .build(); + Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(settings) + .build(); IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName()) - .settings(settings) + .settings(indexSettings) .primaryTerm(0, primaryTerm) .putMapping("_doc", "{ \"properties\": {} }"); - return newShard(shardRouting, metaData.build(), listeners); + return newShard(shardRouting, metaData.build(), engineFactory, listeners); } /** @@ -225,7 +257,7 @@ protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperatio ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(5), primary, ShardRoutingState.INITIALIZING, primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE); - return newShard(shardRouting, listeners); + return newShard(shardRouting, Settings.EMPTY, new InternalEngineFactory(), listeners); } /** @@ -265,9 +297,10 @@ protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, I * @param indexMetaData indexMetaData for the shard, including any mapping * @param listeners an optional set of listeners to add to the shard */ - protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, IndexingOperationListener... listeners) + protected IndexShard newShard( + ShardRouting routing, IndexMetaData indexMetaData, EngineFactory engineFactory, IndexingOperationListener... listeners) throws IOException { - return newShard(routing, indexMetaData, null, new InternalEngineFactory(), () -> {}, listeners); + return newShard(routing, indexMetaData, null, engineFactory, () -> {}, listeners); } /** @@ -298,23 +331,25 @@ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, * @param routing shard routing to use * @param shardPath path to use for shard data * @param indexMetaData indexMetaData for the shard, including any mapping - * @param store an optional custom store to use. If null a default file based store will be created + * @param storeProvider an optional custom store provider to use. If null a default file based store will be created * @param indexSearcherWrapper an optional wrapper to be used during searchers * @param globalCheckpointSyncer callback for syncing global checkpoints * @param indexEventListener index event listener * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, - @Nullable Store store, @Nullable IndexSearcherWrapper indexSearcherWrapper, + @Nullable CheckedFunction storeProvider, + @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); final IndexShard indexShard; - if (store == null) { - store = createStore(indexSettings, shardPath); + if (storeProvider == null) { + storeProvider = is -> createStore(is, shardPath); } + final Store store = storeProvider.apply(indexSettings); boolean success = false; try { IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); @@ -372,7 +407,7 @@ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, Index } /** - * creates a new empyu shard and starts it. The shard will be either a replica or a primary. + * Creates a new empty shard and starts it. The shard will randomly be a replica or a primary. */ protected IndexShard newStartedShard() throws IOException { return newStartedShard(randomBoolean()); @@ -383,8 +418,30 @@ protected IndexShard newStartedShard() throws IOException { * * @param primary controls whether the shard will be a primary or a replica. */ - protected IndexShard newStartedShard(boolean primary) throws IOException { - IndexShard shard = newShard(primary); + protected IndexShard newStartedShard(final boolean primary) throws IOException { + return newStartedShard(primary, Settings.EMPTY, new InternalEngineFactory()); + } + /** + * Creates a new empty shard with the specified settings and engine factory and starts it. + * + * @param primary controls whether the shard will be a primary or a replica. + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + */ + protected IndexShard newStartedShard( + final boolean primary, final Settings settings, final EngineFactory engineFactory) throws IOException { + return newStartedShard(p -> newShard(p, settings, engineFactory), primary); + } + + /** + * creates a new empty shard and starts it. + * + * @param shardFunction shard factory function + * @param primary controls whether the shard will be a primary or a replica. + */ + protected IndexShard newStartedShard(CheckedFunction shardFunction, + boolean primary) throws IOException { + IndexShard shard = shardFunction.apply(primary); if (primary) { recoverShardFromStore(shard); } else { From 8853691310f200fdcc9714a13b44944776098a47 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 30 Aug 2018 22:11:23 -0400 Subject: [PATCH 48/52] Integrates soft-deletes into Elasticsearch (#33222) This PR integrates Lucene soft-deletes(LUCENE-8200) into Elasticsearch. Highlight works in this PR include: - Replace hard-deletes by soft-deletes in InternalEngine - Use _recovery_source if _source is disabled or modified (#31106) - Soft-deletes retention policy based on the global checkpoint (#30335) - Read operation history from Lucene instead of translog (#30120) - Use Lucene history in peer-recovery (#30522) Relates #30086 Closes #29530 --- These works have been done by the whole team; however, these individuals (lexical order) have significant contribution in coding and reviewing: Co-authored-by: Adrien Grand Co-authored-by: Boaz Leskes Co-authored-by: Jason Tedor Co-authored-by: Martijn van Groningen Co-authored-by: Nhat Nguyen Co-authored-by: Simon Willnauer --- .../percolator/CandidateQueryTests.java | 8 +- .../PercolatorFieldMapperTests.java | 30 +- .../elasticsearch/common/lucene/Lucene.java | 86 ++- .../uid/PerThreadIDVersionAndSeqNoLookup.java | 21 +- .../common/settings/IndexScopedSettings.java | 2 + .../elasticsearch/index/IndexSettings.java | 38 ++ .../index/engine/CombinedDeletionPolicy.java | 12 +- .../elasticsearch/index/engine/Engine.java | 28 +- .../index/engine/EngineConfig.java | 27 +- .../index/engine/InternalEngine.java | 418 +++++++++--- .../index/engine/LuceneChangesSnapshot.java | 369 ++++++++++ .../RecoverySourcePruneMergePolicy.java | 292 ++++++++ .../index/engine/SoftDeletesPolicy.java | 120 ++++ .../index/fieldvisitor/FieldsVisitor.java | 10 +- .../index/mapper/DocumentMapper.java | 34 +- .../index/mapper/DocumentParser.java | 33 +- .../index/mapper/FieldNamesFieldMapper.java | 5 +- .../index/mapper/ParseContext.java | 18 +- .../index/mapper/ParsedDocument.java | 11 + .../index/mapper/SeqNoFieldMapper.java | 7 +- .../index/mapper/SourceFieldMapper.java | 16 +- .../elasticsearch/index/shard/IndexShard.java | 47 +- .../index/shard/PrimaryReplicaSyncer.java | 2 +- .../index/shard/StoreRecovery.java | 1 + .../org/elasticsearch/index/store/Store.java | 2 +- .../index/translog/Translog.java | 3 + .../translog/TruncateTranslogCommand.java | 2 + .../recovery/RecoverySourceHandler.java | 59 +- .../blobstore/BlobStoreRepository.java | 1 + .../snapshots/RestoreService.java | 4 +- .../cluster/routing/PrimaryAllocationIT.java | 1 + .../common/lucene/LuceneTests.java | 91 +++ .../discovery/AbstractDisruptionTestCase.java | 1 + .../gateway/RecoveryFromGatewayIT.java | 13 +- .../index/IndexServiceTests.java | 3 +- .../index/IndexSettingsTests.java | 8 + .../engine/CombinedDeletionPolicyTests.java | 77 ++- .../index/engine/InternalEngineTests.java | 630 ++++++++++++------ .../engine/LuceneChangesSnapshotTests.java | 289 ++++++++ .../RecoverySourcePruneMergePolicyTests.java | 161 +++++ .../index/engine/SoftDeletesPolicyTests.java | 75 +++ .../index/mapper/DocumentParserTests.java | 10 +- .../index/mapper/DynamicMappingTests.java | 6 +- .../IndexLevelReplicationTests.java | 29 +- .../RecoveryDuringReplicationTests.java | 11 +- .../index/shard/IndexShardTests.java | 58 +- .../shard/PrimaryReplicaSyncerTests.java | 21 +- .../index/shard/RefreshListenersTests.java | 4 +- .../indices/recovery/IndexRecoveryIT.java | 6 + .../PeerRecoveryTargetServiceTests.java | 2 + .../recovery/RecoverySourceHandlerTests.java | 6 - .../indices/recovery/RecoveryTests.java | 82 ++- .../indices/stats/IndexStatsIT.java | 37 +- .../AbstractSnapshotIntegTestCase.java | 6 + .../SharedClusterSnapshotRestoreIT.java | 13 +- .../versioning/SimpleVersioningIT.java | 23 + .../index/engine/EngineTestCase.java | 412 +++++++++++- .../ESIndexLevelReplicationTestCase.java | 27 +- .../index/shard/IndexShardTestCase.java | 52 +- .../elasticsearch/test/ESIntegTestCase.java | 4 + .../test/ESSingleNodeTestCase.java | 9 + .../test/InternalTestCluster.java | 20 + 62 files changed, 3392 insertions(+), 501 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java create mode 100644 server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java create mode 100644 server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java create mode 100644 server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java create mode 100644 server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java create mode 100644 server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 90370b2f6ff9b..d8165bff531e2 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -77,6 +77,7 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -87,6 +88,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -1109,7 +1111,11 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd } private void addQuery(Query query, List docs) { - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); + ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(query, parseContext); ParseContext.Document queryDocument = parseContext.doc(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index e47c90361cb9f..b3b6cd5f7bd0a 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -42,6 +42,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -58,6 +59,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; @@ -182,7 +184,11 @@ public void testExtractTerms() throws Exception { DocumentMapper documentMapper = mapperService.documentMapper("doc"); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); + ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(bq.build(), parseContext); ParseContext.Document document = parseContext.doc(); @@ -204,7 +210,7 @@ public void testExtractTerms() throws Exception { bq.add(termQuery1, Occur.MUST); bq.add(termQuery2, Occur.MUST); - parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, mapperService.documentMapperParser(), + parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(bq.build(), parseContext); document = parseContext.doc(); @@ -232,8 +238,12 @@ public void testExtractRanges() throws Exception { bq.add(rangeQuery2, Occur.MUST); DocumentMapper documentMapper = mapperService.documentMapper("doc"); + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, + ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(bq.build(), parseContext); ParseContext.Document document = parseContext.doc(); @@ -259,7 +269,7 @@ public void testExtractRanges() throws Exception { .rangeQuery(15, 20, true, true, null, null, null, null); bq.add(rangeQuery2, Occur.MUST); - parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, + parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(bq.build(), parseContext); document = parseContext.doc(); @@ -283,7 +293,11 @@ public void testExtractTermsAndRanges_failed() throws Exception { TermRangeQuery query = new TermRangeQuery("field1", new BytesRef("a"), new BytesRef("z"), true, true); DocumentMapper documentMapper = mapperService.documentMapper("doc"); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); + ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(query, parseContext); ParseContext.Document document = parseContext.doc(); @@ -298,7 +312,11 @@ public void testExtractTermsAndRanges_partial() throws Exception { PhraseQuery phraseQuery = new PhraseQuery("field", "term"); DocumentMapper documentMapper = mapperService.documentMapper("doc"); PercolatorFieldMapper fieldMapper = (PercolatorFieldMapper) documentMapper.mappers().getMapper(fieldName); - ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); + ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(phraseQuery, parseContext); ParseContext.Document document = parseContext.doc(); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index ebd0d5ba2efba..38ab4bb5bb35c 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -27,8 +27,10 @@ import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; @@ -96,6 +98,8 @@ public class Lucene { assert annotation == null : "DocValuesFormat " + LATEST_DOC_VALUES_FORMAT + " is deprecated" ; } + public static final String SOFT_DELETES_FIELD = "__soft_deletes"; + public static final NamedAnalyzer STANDARD_ANALYZER = new NamedAnalyzer("_standard", AnalyzerScope.GLOBAL, new StandardAnalyzer()); public static final NamedAnalyzer KEYWORD_ANALYZER = new NamedAnalyzer("_keyword", AnalyzerScope.GLOBAL, new KeywordAnalyzer()); @@ -140,7 +144,7 @@ public static Iterable files(SegmentInfos infos) throws IOException { public static int getNumDocs(SegmentInfos info) { int numDocs = 0; for (SegmentCommitInfo si : info) { - numDocs += si.info.maxDoc() - si.getDelCount(); + numDocs += si.info.maxDoc() - si.getDelCount() - si.getSoftDelCount(); } return numDocs; } @@ -197,6 +201,7 @@ public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Direc } final CommitPoint cp = new CommitPoint(si, directory); try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setIndexCommit(cp) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) @@ -220,6 +225,7 @@ public static void cleanLuceneIndex(Directory directory) throws IOException { } } try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(NoMergePolicy.INSTANCE) // no merges .setCommitOnClose(false) // no commits .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append... @@ -829,4 +835,82 @@ public int length() { } }; } + + /** + * Wraps a directory reader to make all documents live except those were rolled back + * or hard-deleted due to non-aborting exceptions during indexing. + * The wrapped reader can be used to query all documents. + * + * @param in the input directory reader + * @return the wrapped reader + */ + public static DirectoryReader wrapAllDocsLive(DirectoryReader in) throws IOException { + return new DirectoryReaderWithAllLiveDocs(in); + } + + private static final class DirectoryReaderWithAllLiveDocs extends FilterDirectoryReader { + static final class LeafReaderWithLiveDocs extends FilterLeafReader { + final Bits liveDocs; + final int numDocs; + LeafReaderWithLiveDocs(LeafReader in, Bits liveDocs, int numDocs) { + super(in); + this.liveDocs = liveDocs; + this.numDocs = numDocs; + } + @Override + public Bits getLiveDocs() { + return liveDocs; + } + @Override + public int numDocs() { + return numDocs; + } + @Override + public CacheHelper getCoreCacheHelper() { + return in.getCoreCacheHelper(); + } + @Override + public CacheHelper getReaderCacheHelper() { + return null; // Modifying liveDocs + } + } + + DirectoryReaderWithAllLiveDocs(DirectoryReader in) throws IOException { + super(in, new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader leaf) { + SegmentReader segmentReader = segmentReader(leaf); + Bits hardLiveDocs = segmentReader.getHardLiveDocs(); + if (hardLiveDocs == null) { + return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc()); + } + // TODO: Can we avoid calculate numDocs by using SegmentReader#getSegmentInfo with LUCENE-8458? + int numDocs = 0; + for (int i = 0; i < hardLiveDocs.length(); i++) { + if (hardLiveDocs.get(i)) { + numDocs++; + } + } + return new LeafReaderWithLiveDocs(segmentReader, hardLiveDocs, numDocs); + } + }); + } + + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return wrapAllDocsLive(in); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return null; // Modifying liveDocs + } + } + + /** + * Returns a numeric docvalues which can be used to soft-delete documents. + */ + public static NumericDocValuesField newSoftDeletesField() { + return new NumericDocValuesField(SOFT_DELETES_FIELD, 1); + } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index 38fcdfe5f1b62..3a037bed62b7f 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.index.mapper.SeqNoFieldMapper; @@ -66,15 +67,22 @@ final class PerThreadIDVersionAndSeqNoLookup { */ PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException { this.uidField = uidField; - Terms terms = reader.terms(uidField); + final Terms terms = reader.terms(uidField); if (terms == null) { - throw new IllegalArgumentException("reader misses the [" + uidField + "] field"); + // If a segment contains only no-ops, it does not have _uid but has both _soft_deletes and _tombstone fields. + final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); + final NumericDocValues tombstoneDV = reader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME); + if (softDeletesDV == null || tombstoneDV == null) { + throw new IllegalArgumentException("reader does not have _uid terms but not a no-op segment; " + + "_soft_deletes [" + softDeletesDV + "], _tombstone [" + tombstoneDV + "]"); + } + termsEnum = null; + } else { + termsEnum = terms.iterator(); } - termsEnum = terms.iterator(); if (reader.getNumericDocValues(VersionFieldMapper.NAME) == null) { - throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field"); + throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field; _uid terms [" + terms + "]"); } - Object readerKey = null; assert (readerKey = reader.getCoreCacheHelper().getKey()) != null; this.readerKey = readerKey; @@ -111,7 +119,8 @@ public DocIdAndVersion lookupVersion(BytesRef id, LeafReaderContext context) * {@link DocIdSetIterator#NO_MORE_DOCS} is returned if not found * */ private int getDocID(BytesRef id, Bits liveDocs) throws IOException { - if (termsEnum.seekExact(id)) { + // termsEnum can possibly be null here if this leaf contains only no-ops. + if (termsEnum != null && termsEnum.seekExact(id)) { int docID = DocIdSetIterator.NO_MORE_DOCS; // there may be more than one matching docID, in the case of nested docs, so we want the last one: docsEnum = termsEnum.postings(docsEnum, 0); diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index df7c670fb2c9a..0ab765099272a 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -129,6 +129,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_REGEX_LENGTH_SETTING, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, + IndexSettings.INDEX_SOFT_DELETES_SETTING, + IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING, UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index fc0e586c996f6..4ced4a98db09b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -243,6 +243,21 @@ public final class IndexSettings { public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); + + /** + * Specifies if the index should use soft-delete instead of hard-delete for update/delete operations. + */ + public static final Setting INDEX_SOFT_DELETES_SETTING = + Setting.boolSetting("index.soft_deletes.enabled", false, Property.IndexScope, Property.Final); + + /** + * Controls how many soft-deleted documents will be kept around before being merged away. Keeping more deleted + * documents increases the chance of operation-based recoveries and allows querying a longer history of documents. + * If soft-deletes is enabled, an engine by default will retain all operations up to the global checkpoint. + **/ + public static final Setting INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING = + Setting.longSetting("index.soft_deletes.retention.operations", 0, 0, Property.IndexScope, Property.Dynamic); + /** * The maximum number of refresh listeners allows on this shard. */ @@ -309,6 +324,8 @@ public final class IndexSettings { private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); + private final boolean softDeleteEnabled; + private volatile long softDeleteRetentionOperations; private volatile boolean warmerEnabled; private volatile int maxResultWindow; private volatile int maxInnerResultWindow; @@ -423,6 +440,8 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); + softDeleteEnabled = version.onOrAfter(Version.V_6_5_0) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING); + softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); maxInnerResultWindow = scopedSettings.get(MAX_INNER_RESULT_WINDOW_SETTING); @@ -482,6 +501,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields); scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength); scopedSettings.addSettingsUpdateConsumer(DEFAULT_PIPELINE, this::setDefaultPipeline); + scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations); } private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) { @@ -850,4 +870,22 @@ public String getDefaultPipeline() { public void setDefaultPipeline(String defaultPipeline) { this.defaultPipeline = defaultPipeline; } + + /** + * Returns true if soft-delete is enabled. + */ + public boolean isSoftDeleteEnabled() { + return softDeleteEnabled; + } + + private void setSoftDeleteRetentionOperations(long ops) { + this.softDeleteRetentionOperations = ops; + } + + /** + * Returns the number of extra operations (i.e. soft-deleted documents) to be kept for recoveries and history purpose. + */ + public long getSoftDeleteRetentionOperations() { + return this.softDeleteRetentionOperations; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 774224cd29c18..ddb374fa50cf1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -46,14 +46,17 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final Logger logger; private final TranslogDeletionPolicy translogDeletionPolicy; + private final SoftDeletesPolicy softDeletesPolicy; private final LongSupplier globalCheckpointSupplier; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private volatile IndexCommit lastCommit; // the most recent commit point - CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) { + CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, + SoftDeletesPolicy softDeletesPolicy, LongSupplier globalCheckpointSupplier) { this.logger = logger; this.translogDeletionPolicy = translogDeletionPolicy; + this.softDeletesPolicy = softDeletesPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; this.snapshottedCommits = new ObjectIntHashMap<>(); } @@ -80,7 +83,7 @@ public synchronized void onCommit(List commits) throws IO deleteCommit(commits.get(i)); } } - updateTranslogDeletionPolicy(); + updateRetentionPolicy(); } private void deleteCommit(IndexCommit commit) throws IOException { @@ -90,7 +93,7 @@ private void deleteCommit(IndexCommit commit) throws IOException { assert commit.isDeleted() : "Deletion commit [" + commitDescription(commit) + "] was suppressed"; } - private void updateTranslogDeletionPolicy() throws IOException { + private void updateRetentionPolicy() throws IOException { assert Thread.holdsLock(this); logger.debug("Safe commit [{}], last commit [{}]", commitDescription(safeCommit), commitDescription(lastCommit)); assert safeCommit.isDeleted() == false : "The safe commit must not be deleted"; @@ -101,6 +104,9 @@ private void updateTranslogDeletionPolicy() throws IOException { assert minRequiredGen <= lastGen : "minRequiredGen must not be greater than lastGen"; translogDeletionPolicy.setTranslogGenerationOfLastCommit(lastGen); translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredGen); + + softDeletesPolicy.setLocalCheckpointOfSafeCommit( + Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY))); } /** diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index fe29542d1e2ef..5c7268a134225 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -58,6 +58,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -97,6 +98,7 @@ public abstract class Engine implements Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; public static final String HISTORY_UUID_KEY = "history_uuid"; + public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; protected final ShardId shardId; protected final String allocationId; @@ -585,18 +587,32 @@ public enum SearcherScope { public abstract void syncTranslog() throws IOException; - public abstract Closeable acquireTranslogRetentionLock(); + /** + * Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed + */ + public abstract Closeable acquireRetentionLockForPeerRecovery(); + + /** + * Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive) + */ + public abstract Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException; + + /** + * Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive). + * The returned snapshot can be retrieved from either Lucene index or translog files. + */ + public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; /** - * Creates a new translog snapshot from this engine for reading translog operations whose seq# at least the provided seq#. - * The caller has to close the returned snapshot after finishing the reading. + * Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine. */ - public abstract Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException; + public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; /** - * Returns the estimated number of translog operations in this engine whose seq# at least the provided seq#. + * Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog) */ - public abstract int estimateTranslogOperationsFromMinSeq(long minSeqNo); + public abstract boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException; public abstract TranslogStats getTranslogStats(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 2deae61bd52e9..23a90553f60a8 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -80,6 +81,7 @@ public final class EngineConfig { private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; private final LongSupplier primaryTermSupplier; + private final TombstoneDocSupplier tombstoneDocSupplier; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -126,7 +128,8 @@ public EngineConfig(ShardId shardId, String allocationId, ThreadPool threadPool, List externalRefreshListener, List internalRefreshListener, Sort indexSort, TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService, - LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) { + LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier, + TombstoneDocSupplier tombstoneDocSupplier) { this.shardId = shardId; this.allocationId = allocationId; this.indexSettings = indexSettings; @@ -164,6 +167,7 @@ public EngineConfig(ShardId shardId, String allocationId, ThreadPool threadPool, this.circuitBreakerService = circuitBreakerService; this.globalCheckpointSupplier = globalCheckpointSupplier; this.primaryTermSupplier = primaryTermSupplier; + this.tombstoneDocSupplier = tombstoneDocSupplier; } /** @@ -373,4 +377,25 @@ public CircuitBreakerService getCircuitBreakerService() { public LongSupplier getPrimaryTermSupplier() { return primaryTermSupplier; } + + /** + * A supplier supplies tombstone documents which will be used in soft-update methods. + * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. + */ + public interface TombstoneDocSupplier { + /** + * Creates a tombstone document for a delete operation. + */ + ParsedDocument newDeleteTombstoneDoc(String type, String id); + + /** + * Creates a tombstone document for a noop operation. + * @param reason the reason of an a noop + */ + ParsedDocument newNoopTombstoneDoc(String reason); + } + + public TombstoneDocSupplier getTombstoneDocSupplier() { + return tombstoneDocSupplier; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 4f83a1cfb6f30..75443eb21c30d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -21,16 +21,20 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ReferenceManager; @@ -42,6 +46,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; +import org.elasticsearch.Assertions; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; @@ -62,7 +67,11 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; @@ -144,6 +153,10 @@ public class InternalEngine extends Engine { private final CounterMetric numDocDeletes = new CounterMetric(); private final CounterMetric numDocAppends = new CounterMetric(); private final CounterMetric numDocUpdates = new CounterMetric(); + private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField(); + private final boolean softDeleteEnabled; + private final SoftDeletesPolicy softDeletesPolicy; + private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this @@ -189,8 +202,10 @@ public InternalEngine(EngineConfig engineConfig) { assert translog.getGeneration() != null; this.translog = translog; this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier); + this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); + this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = - new CombinedDeletionPolicy(logger, translogDeletionPolicy, translog::getLastSyncedGlobalCheckpoint); + new CombinedDeletionPolicy(logger, translogDeletionPolicy, softDeletesPolicy, translog::getLastSyncedGlobalCheckpoint); writer = createWriter(); bootstrapAppendOnlyInfoFromWriter(writer); historyUUID = loadHistoryUUID(writer); @@ -220,6 +235,8 @@ public InternalEngine(EngineConfig engineConfig) { for (ReferenceManager.RefreshListener listener: engineConfig.getInternalRefreshListener()) { this.internalSearcherManager.addListener(listener); } + this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getCheckpoint()); + this.internalSearcherManager.addListener(lastRefreshedCheckpointListener); success = true; } finally { if (success == false) { @@ -245,6 +262,18 @@ private LocalCheckpointTracker createLocalCheckpointTracker( return localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint); } + private SoftDeletesPolicy newSoftDeletesPolicy() throws IOException { + final Map commitUserData = store.readLastCommittedSegmentsInfo().userData; + final long lastMinRetainedSeqNo; + if (commitUserData.containsKey(Engine.MIN_RETAINED_SEQNO)) { + lastMinRetainedSeqNo = Long.parseLong(commitUserData.get(Engine.MIN_RETAINED_SEQNO)); + } else { + lastMinRetainedSeqNo = Long.parseLong(commitUserData.get(SequenceNumbers.MAX_SEQ_NO)) + 1; + } + return new SoftDeletesPolicy(translog::getLastSyncedGlobalCheckpoint, lastMinRetainedSeqNo, + engineConfig.getIndexSettings().getSoftDeleteRetentionOperations()); + } + /** * This reference manager delegates all it's refresh calls to another (internal) SearcherManager * The main purpose for this is that if we have external refreshes happening we don't issue extra @@ -464,19 +493,31 @@ public void syncTranslog() throws IOException { revisitIndexDeletionPolicyOnTranslogSynced(); } + /** + * Creates a new history snapshot for reading operations since the provided seqno. + * The returned snapshot can be retrieved from either Lucene index or translog files. + */ @Override - public Closeable acquireTranslogRetentionLock() { - return getTranslog().acquireRetentionLock(); - } - - @Override - public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException { - return getTranslog().newSnapshotFromMinSeqNo(minSeqNo); + public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); + } else { + return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); + } } + /** + * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. + */ @Override - public int estimateTranslogOperationsFromMinSeq(long minSeqNo) { - return getTranslog().estimateTotalOperationsFromMinSeq(minSeqNo); + public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) { + return snapshot.totalOperations(); + } + } else { + return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); + } } @Override @@ -844,7 +885,7 @@ public IndexResult index(Index index) throws IOException { if (plan.earlyResultOnPreFlightError.isPresent()) { indexResult = plan.earlyResultOnPreFlightError.get(); assert indexResult.getResultType() == Result.Type.FAILURE : indexResult.getResultType(); - } else if (plan.indexIntoLucene) { + } else if (plan.indexIntoLucene || plan.addStaleOpToLucene) { indexResult = indexIntoLucene(index, plan); } else { indexResult = new IndexResult( @@ -855,8 +896,10 @@ public IndexResult index(Index index) throws IOException { if (indexResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Index(index, indexResult)); } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - // if we have document failure, record it as a no-op in the translog with the generated seq_no - location = translog.add(new Translog.NoOp(indexResult.getSeqNo(), index.primaryTerm(), indexResult.getFailure().toString())); + // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no + final NoOp noOp = new NoOp(indexResult.getSeqNo(), index.primaryTerm(), index.origin(), + index.startTime(), indexResult.getFailure().toString()); + location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; } @@ -911,14 +954,7 @@ private IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOExceptio // unlike the primary, replicas don't really care to about creation status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return false for the created flag in favor of code simplicity - final OpVsLuceneDocStatus opVsLucene; - if (index.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { - // This can happen if the primary is still on an old node and send traffic without seq# or we recover from translog - // created by an old version. - assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : - "index is newly created but op has no sequence numbers. op: " + index; - opVsLucene = compareOpToLuceneDocBasedOnVersions(index); - } else if (index.seqNo() <= localCheckpointTracker.getCheckpoint()){ + if (index.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && index.seqNo() <= localCheckpointTracker.getCheckpoint()){ // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already // part of the lucene commit (either from a peer recovery or a local translog) @@ -926,16 +962,24 @@ assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0 // question may have been deleted in an out of order op that is not replayed. // See testRecoverFromStoreWithOutOfOrderDelete for an example of local recovery // See testRecoveryWithOutOfOrderDelete for an example of peer recovery - opVsLucene = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; - } else { - opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); - } - if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { plan = IndexingStrategy.processButSkipLucene(false, index.seqNo(), index.version()); } else { - plan = IndexingStrategy.processNormally( - opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, index.seqNo(), index.version() - ); + final OpVsLuceneDocStatus opVsLucene; + if (index.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { + // This can happen if the primary is still on an old node and send traffic without seq# + // or we recover from translog created by an old version. + assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : + "index is newly created but op has no sequence numbers. op: " + index; + opVsLucene = compareOpToLuceneDocBasedOnVersions(index); + } else { + opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); + } + if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { + plan = IndexingStrategy.processAsStaleOp(softDeleteEnabled, index.seqNo(), index.version()); + } else { + plan = IndexingStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, + index.seqNo(), index.version()); + } } } return plan; @@ -984,7 +1028,7 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) throws IOException { assert assertSequenceNumberBeforeIndexing(index.origin(), plan.seqNoForIndexing); assert plan.versionForIndexing >= 0 : "version must be set. got " + plan.versionForIndexing; - assert plan.indexIntoLucene; + assert plan.indexIntoLucene || plan.addStaleOpToLucene; /* Update the document's sequence number and primary term; the sequence number here is derived here from either the sequence * number service if this is on the primary, or the existing document's sequence number if this is on the replica. The * primary term here has already been set, see IndexShard#prepareIndex where the Engine$Index operation is created. @@ -992,7 +1036,9 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) index.parsedDoc().updateSeqID(plan.seqNoForIndexing, index.primaryTerm()); index.parsedDoc().version().setLongValue(plan.versionForIndexing); try { - if (plan.useLuceneUpdateDocument) { + if (plan.addStaleOpToLucene) { + addStaleDocs(index.docs(), indexWriter); + } else if (plan.useLuceneUpdateDocument) { updateDocs(index.uid(), index.docs(), indexWriter); } else { // document does not exists, we can optimize for create, but double check if assertions are running @@ -1056,16 +1102,29 @@ private void addDocs(final List docs, final IndexWriter i numDocAppends.inc(docs.size()); } - private static final class IndexingStrategy { + private void addStaleDocs(final List docs, final IndexWriter indexWriter) throws IOException { + assert softDeleteEnabled : "Add history documents but soft-deletes is disabled"; + for (ParseContext.Document doc : docs) { + doc.add(softDeletesField); // soft-deleted every document before adding to Lucene + } + if (docs.size() > 1) { + indexWriter.addDocuments(docs); + } else { + indexWriter.addDocument(docs.get(0)); + } + } + + protected static final class IndexingStrategy { final boolean currentNotFoundOrDeleted; final boolean useLuceneUpdateDocument; final long seqNoForIndexing; final long versionForIndexing; final boolean indexIntoLucene; + final boolean addStaleOpToLucene; final Optional earlyResultOnPreFlightError; private IndexingStrategy(boolean currentNotFoundOrDeleted, boolean useLuceneUpdateDocument, - boolean indexIntoLucene, long seqNoForIndexing, + boolean indexIntoLucene, boolean addStaleOpToLucene, long seqNoForIndexing, long versionForIndexing, IndexResult earlyResultOnPreFlightError) { assert useLuceneUpdateDocument == false || indexIntoLucene : "use lucene update is set to true, but we're not indexing into lucene"; @@ -1078,37 +1137,40 @@ private IndexingStrategy(boolean currentNotFoundOrDeleted, boolean useLuceneUpda this.seqNoForIndexing = seqNoForIndexing; this.versionForIndexing = versionForIndexing; this.indexIntoLucene = indexIntoLucene; + this.addStaleOpToLucene = addStaleOpToLucene; this.earlyResultOnPreFlightError = earlyResultOnPreFlightError == null ? Optional.empty() : Optional.of(earlyResultOnPreFlightError); } static IndexingStrategy optimizedAppendOnly(long seqNoForIndexing) { - return new IndexingStrategy(true, false, true, seqNoForIndexing, 1, null); + return new IndexingStrategy(true, false, true, false, seqNoForIndexing, 1, null); } static IndexingStrategy skipDueToVersionConflict( VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long term) { final IndexResult result = new IndexResult(e, currentVersion, term); return new IndexingStrategy( - currentNotFoundOrDeleted, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); + currentNotFoundOrDeleted, false, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); } static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, long seqNoForIndexing, long versionForIndexing) { return new IndexingStrategy(currentNotFoundOrDeleted, currentNotFoundOrDeleted == false, - true, seqNoForIndexing, versionForIndexing, null); + true, false, seqNoForIndexing, versionForIndexing, null); } static IndexingStrategy overrideExistingAsIfNotThere( long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(true, true, true, seqNoForIndexing, versionForIndexing, null); + return new IndexingStrategy(true, true, true, false, seqNoForIndexing, versionForIndexing, null); } - static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, - long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(currentNotFoundOrDeleted, false, - false, seqNoForIndexing, versionForIndexing, null); + static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long seqNoForIndexing, long versionForIndexing) { + return new IndexingStrategy(currentNotFoundOrDeleted, false, false, false, seqNoForIndexing, versionForIndexing, null); + } + + static IndexingStrategy processAsStaleOp(boolean addStaleOpToLucene, long seqNoForIndexing, long versionForIndexing) { + return new IndexingStrategy(false, false, false, addStaleOpToLucene, seqNoForIndexing, versionForIndexing, null); } } @@ -1135,10 +1197,18 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele } private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { - if (docs.size() > 1) { - indexWriter.updateDocuments(uid, docs); + if (softDeleteEnabled) { + if (docs.size() > 1) { + indexWriter.softUpdateDocuments(uid, docs, softDeletesField); + } else { + indexWriter.softUpdateDocument(uid, docs.get(0), softDeletesField); + } } else { - indexWriter.updateDocument(uid, docs.get(0)); + if (docs.size() > 1) { + indexWriter.updateDocuments(uid, docs); + } else { + indexWriter.updateDocument(uid, docs.get(0)); + } } numDocUpdates.inc(docs.size()); } @@ -1163,7 +1233,7 @@ public DeleteResult delete(Delete delete) throws IOException { if (plan.earlyResultOnPreflightError.isPresent()) { deleteResult = plan.earlyResultOnPreflightError.get(); - } else if (plan.deleteFromLucene) { + } else if (plan.deleteFromLucene || plan.addStaleOpToLucene) { deleteResult = deleteInLucene(delete, plan); } else { deleteResult = new DeleteResult( @@ -1174,8 +1244,10 @@ public DeleteResult delete(Delete delete) throws IOException { if (deleteResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Delete(delete, deleteResult)); } else if (deleteResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(), - delete.primaryTerm(), deleteResult.getFailure().toString())); + // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no + final NoOp noOp = new NoOp(deleteResult.getSeqNo(), delete.primaryTerm(), delete.origin(), + delete.startTime(), deleteResult.getFailure().toString()); + location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; } @@ -1210,12 +1282,8 @@ private DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOExcept // unlike the primary, replicas don't really care to about found status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity - final OpVsLuceneDocStatus opVsLucene; - if (delete.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { - assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : - "index is newly created but op has no sequence numbers. op: " + delete; - opVsLucene = compareOpToLuceneDocBasedOnVersions(delete); - } else if (delete.seqNo() <= localCheckpointTracker.getCheckpoint()) { + final DeletionStrategy plan; + if (delete.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && delete.seqNo() <= localCheckpointTracker.getCheckpoint()) { // the operation seq# is lower then the current local checkpoint and thus was already put into lucene // this can happen during recovery where older operations are sent from the translog that are already // part of the lucene commit (either from a peer recovery or a local translog) @@ -1223,18 +1291,24 @@ assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0 // question may have been deleted in an out of order op that is not replayed. // See testRecoverFromStoreWithOutOfOrderDelete for an example of local recovery // See testRecoveryWithOutOfOrderDelete for an example of peer recovery - opVsLucene = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; - } else { - opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); - } - - final DeletionStrategy plan; - if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { plan = DeletionStrategy.processButSkipLucene(false, delete.seqNo(), delete.version()); } else { - plan = DeletionStrategy.processNormally( - opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, - delete.seqNo(), delete.version()); + final OpVsLuceneDocStatus opVsLucene; + if (delete.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { + // This can happen if the primary is still on an old node and send traffic without seq# + // or we recover from translog created by an old version. + assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : + "index is newly created but op has no sequence numbers. op: " + delete; + opVsLucene = compareOpToLuceneDocBasedOnVersions(delete); + } else { + opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); + } + if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { + plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, false, delete.seqNo(), delete.version()); + } else { + plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, + delete.seqNo(), delete.version()); + } } return plan; } @@ -1269,15 +1343,31 @@ private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) throws IOException { try { - if (plan.currentlyDeleted == false) { + if (softDeleteEnabled) { + final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.type(), delete.id()); + assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; + tombstone.updateSeqID(plan.seqNoOfDeletion, delete.primaryTerm()); + tombstone.version().setLongValue(plan.versionOfDeletion); + final ParseContext.Document doc = tombstone.docs().get(0); + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : + "Delete tombstone document but _tombstone field is not set [" + doc + " ]"; + doc.add(softDeletesField); + if (plan.addStaleOpToLucene || plan.currentlyDeleted) { + indexWriter.addDocument(doc); + } else { + indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField); + } + } else if (plan.currentlyDeleted == false) { // any exception that comes from this is a either an ACE or a fatal exception there // can't be any document failures coming from this indexWriter.deleteDocuments(delete.uid()); + } + if (plan.deleteFromLucene) { numDocDeletes.inc(); + versionMap.putDeleteUnderLock(delete.uid().bytes(), + new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), + engineConfig.getThreadPool().relativeTimeInMillis())); } - versionMap.putDeleteUnderLock(delete.uid().bytes(), - new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), - engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false); } catch (Exception ex) { @@ -1294,12 +1384,13 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) private static final class DeletionStrategy { // of a rare double delete final boolean deleteFromLucene; + final boolean addStaleOpToLucene; final boolean currentlyDeleted; final long seqNoOfDeletion; final long versionOfDeletion; final Optional earlyResultOnPreflightError; - private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, + private DeletionStrategy(boolean deleteFromLucene, boolean addStaleOpToLucene, boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion, DeleteResult earlyResultOnPreflightError) { assert (deleteFromLucene && earlyResultOnPreflightError != null) == false : @@ -1307,6 +1398,7 @@ private DeletionStrategy(boolean deleteFromLucene, boolean currentlyDeleted, "deleteFromLucene: " + deleteFromLucene + " earlyResultOnPreFlightError:" + earlyResultOnPreflightError; this.deleteFromLucene = deleteFromLucene; + this.addStaleOpToLucene = addStaleOpToLucene; this.currentlyDeleted = currentlyDeleted; this.seqNoOfDeletion = seqNoOfDeletion; this.versionOfDeletion = versionOfDeletion; @@ -1318,16 +1410,22 @@ static DeletionStrategy skipDueToVersionConflict( VersionConflictEngineException e, long currentVersion, long term, boolean currentlyDeleted) { final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; final DeleteResult deleteResult = new DeleteResult(e, currentVersion, term, unassignedSeqNo, currentlyDeleted == false); - return new DeletionStrategy(false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult); + return new DeletionStrategy(false, false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult); } static DeletionStrategy processNormally(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(true, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + return new DeletionStrategy(true, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + + } + public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, + long seqNoOfDeletion, long versionOfDeletion) { + return new DeletionStrategy(false, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); } - public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, + long seqNoOfDeletion, long versionOfDeletion) { + return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); } } @@ -1356,7 +1454,28 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try { - final NoOpResult noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo()); + Exception failure = null; + if (softDeleteEnabled) { + try { + final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newNoopTombstoneDoc(noOp.reason()); + tombstone.updateSeqID(noOp.seqNo(), noOp.primaryTerm()); + // A noop tombstone does not require a _version but it's added to have a fully dense docvalues for the version field. + // 1L is selected to optimize the compression because it might probably be the most common value in version field. + tombstone.version().setLongValue(1L); + assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]"; + final ParseContext.Document doc = tombstone.docs().get(0); + assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null + : "Noop tombstone document but _tombstone field is not set [" + doc + " ]"; + doc.add(softDeletesField); + indexWriter.addDocument(doc); + } catch (Exception ex) { + if (maybeFailEngine("noop", ex)) { + throw ex; + } + failure = ex; + } + } + final NoOpResult noOpResult = failure != null ? new NoOpResult(getPrimaryTerm(), noOp.seqNo(), failure) : new NoOpResult(getPrimaryTerm(), noOp.seqNo()); if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); @@ -1381,6 +1500,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException { // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) // both refresh types will result in an internal refresh but only the external will also // pass the new reader reference to the external reader manager. + final long localCheckpointBeforeRefresh = getLocalCheckpoint(); // this will also cause version map ram to be freed hence we always account for it. final long bytes = indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh(); @@ -1406,6 +1526,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException { } finally { store.decRef(); } + lastRefreshedCheckpointListener.updateRefreshedCheckpoint(localCheckpointBeforeRefresh); } } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -1420,7 +1541,8 @@ final void refresh(String source, SearcherScope scope) throws EngineException { } finally { writingBytes.addAndGet(-bytes); } - + assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " + + "local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint(); // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: @@ -2002,7 +2124,11 @@ private IndexWriter createWriter() throws IOException { // pkg-private for testing IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { - return new IndexWriter(directory, iwc); + if (Assertions.ENABLED) { + return new AssertingIndexWriter(directory, iwc); + } else { + return new IndexWriter(directory, iwc); + } } private IndexWriterConfig getIndexWriterConfig() { @@ -2018,11 +2144,15 @@ private IndexWriterConfig getIndexWriterConfig() { } iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); iwc.setMergeScheduler(mergeScheduler); - MergePolicy mergePolicy = config().getMergePolicy(); // Give us the opportunity to upgrade old segments while performing // background merges - mergePolicy = new ElasticsearchMergePolicy(mergePolicy); - iwc.setMergePolicy(mergePolicy); + MergePolicy mergePolicy = config().getMergePolicy(); + if (softDeleteEnabled) { + iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); + mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery, + new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy)); + } + iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy)); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); @@ -2219,6 +2349,9 @@ protected void commitIndexWriter(final IndexWriter writer, final Translog transl commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(localCheckpointTracker.getMaxSeqNo())); commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get())); commitData.put(HISTORY_UUID_KEY, historyUUID); + if (softDeleteEnabled) { + commitData.put(Engine.MIN_RETAINED_SEQNO, Long.toString(softDeletesPolicy.getMinRetainedSeqNo())); + } logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); @@ -2274,6 +2407,7 @@ public void onSettingsChanged() { final IndexSettings indexSettings = engineConfig.getIndexSettings(); translogDeletionPolicy.setRetentionAgeInMillis(indexSettings.getTranslogRetentionAge().getMillis()); translogDeletionPolicy.setRetentionSizeInBytes(indexSettings.getTranslogRetentionSize().getBytes()); + softDeletesPolicy.setRetentionOperations(indexSettings.getSoftDeleteRetentionOperations()); } public MergeStats getMergeStats() { @@ -2368,6 +2502,69 @@ long getNumDocUpdates() { return numDocUpdates.count(); } + @Override + public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + // TODO: Should we defer the refresh until we really need it? + ensureOpen(); + if (lastRefreshedCheckpoint() < toSeqNo) { + refresh(source, SearcherScope.INTERNAL); + } + Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); + try { + LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange); + searcher = null; + return snapshot; + } catch (Exception e) { + try { + maybeFailEngine("acquire changes snapshot", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; + } finally { + IOUtils.close(searcher); + } + } + + @Override + public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { + if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + return getMinRetainedSeqNo() <= startingSeqNo; + } else { + final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); + final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); + try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { + tracker.markSeqNoAsCompleted(operation.seqNo()); + } + } + } + return tracker.getCheckpoint() >= currentLocalCheckpoint; + } + } + + /** + * Returns the minimum seqno that is retained in the Lucene index. + * Operations whose seq# are at least this value should exist in the Lucene index. + */ + final long getMinRetainedSeqNo() { + assert softDeleteEnabled : Thread.currentThread().getName(); + return softDeletesPolicy.getMinRetainedSeqNo(); + } + + @Override + public Closeable acquireRetentionLockForPeerRecovery() { + if (softDeleteEnabled) { + return softDeletesPolicy.acquireRetentionLock(); + } else { + return translog.acquireRetentionLock(); + } + } + @Override public boolean isRecovering() { return pendingTranslogRecovery.get(); @@ -2383,4 +2580,69 @@ private static Map commitDataAsMap(final IndexWriter indexWriter } return commitData; } + + private final class AssertingIndexWriter extends IndexWriter { + AssertingIndexWriter(Directory d, IndexWriterConfig conf) throws IOException { + super(d, conf); + } + @Override + public long updateDocument(Term term, Iterable doc) throws IOException { + assert softDeleteEnabled == false : "Call #updateDocument but soft-deletes is enabled"; + return super.updateDocument(term, doc); + } + @Override + public long updateDocuments(Term delTerm, Iterable> docs) throws IOException { + assert softDeleteEnabled == false : "Call #updateDocuments but soft-deletes is enabled"; + return super.updateDocuments(delTerm, docs); + } + @Override + public long deleteDocuments(Term... terms) throws IOException { + assert softDeleteEnabled == false : "Call #deleteDocuments but soft-deletes is enabled"; + return super.deleteDocuments(terms); + } + @Override + public long softUpdateDocument(Term term, Iterable doc, Field... softDeletes) throws IOException { + assert softDeleteEnabled : "Call #softUpdateDocument but soft-deletes is disabled"; + return super.softUpdateDocument(term, doc, softDeletes); + } + @Override + public long softUpdateDocuments(Term term, Iterable> docs, Field... softDeletes) throws IOException { + assert softDeleteEnabled : "Call #softUpdateDocuments but soft-deletes is disabled"; + return super.softUpdateDocuments(term, docs, softDeletes); + } + } + + /** + * Returned the last local checkpoint value has been refreshed internally. + */ + final long lastRefreshedCheckpoint() { + return lastRefreshedCheckpointListener.refreshedCheckpoint.get(); + } + + private final class LastRefreshedCheckpointListener implements ReferenceManager.RefreshListener { + final AtomicLong refreshedCheckpoint; + private long pendingCheckpoint; + + LastRefreshedCheckpointListener(long initialLocalCheckpoint) { + this.refreshedCheckpoint = new AtomicLong(initialLocalCheckpoint); + } + + @Override + public void beforeRefresh() { + // all changes until this point should be visible after refresh + pendingCheckpoint = localCheckpointTracker.getCheckpoint(); + } + + @Override + public void afterRefresh(boolean didRefresh) { + if (didRefresh) { + updateRefreshedCheckpoint(pendingCheckpoint); + } + } + + void updateRefreshedCheckpoint(long checkpoint) { + refreshedCheckpoint.updateAndGet(curr -> Math.max(curr, checkpoint)); + assert refreshedCheckpoint.get() >= checkpoint : refreshedCheckpoint.get() + " < " + checkpoint; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java new file mode 100644 index 0000000000000..14b5c4df81b72 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -0,0 +1,369 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.fieldvisitor.FieldsVisitor; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.index.translog.Translog; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A {@link Translog.Snapshot} from changes in a Lucene index + */ +final class LuceneChangesSnapshot implements Translog.Snapshot { + static final int DEFAULT_BATCH_SIZE = 1024; + + private final int searchBatchSize; + private final long fromSeqNo, toSeqNo; + private long lastSeenSeqNo; + private int skippedOperations; + private final boolean requiredFullRange; + + private final IndexSearcher indexSearcher; + private final MapperService mapperService; + private int docIndex = 0; + private final int totalHits; + private ScoreDoc[] scoreDocs; + private final ParallelArray parallelArray; + private final Closeable onClose; + + /** + * Creates a new "translog" snapshot from Lucene for reading operations whose seq# in the specified range. + * + * @param engineSearcher the internal engine searcher which will be taken over if the snapshot is opened successfully + * @param mapperService the mapper service which will be mainly used to resolve the document's type and uid + * @param searchBatchSize the number of documents should be returned by each search + * @param fromSeqNo the min requesting seq# - inclusive + * @param toSeqNo the maximum requesting seq# - inclusive + * @param requiredFullRange if true, the snapshot will strictly check for the existence of operations between fromSeqNo and toSeqNo + */ + LuceneChangesSnapshot(Engine.Searcher engineSearcher, MapperService mapperService, int searchBatchSize, + long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { + throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]"); + } + if (searchBatchSize <= 0) { + throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]"); + } + final AtomicBoolean closed = new AtomicBoolean(); + this.onClose = () -> { + if (closed.compareAndSet(false, true)) { + IOUtils.close(engineSearcher); + } + }; + this.mapperService = mapperService; + this.searchBatchSize = searchBatchSize; + this.fromSeqNo = fromSeqNo; + this.toSeqNo = toSeqNo; + this.lastSeenSeqNo = fromSeqNo - 1; + this.requiredFullRange = requiredFullRange; + this.indexSearcher = new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader())); + this.indexSearcher.setQueryCache(null); + this.parallelArray = new ParallelArray(searchBatchSize); + final TopDocs topDocs = searchOperations(null); + this.totalHits = Math.toIntExact(topDocs.totalHits); + this.scoreDocs = topDocs.scoreDocs; + fillParallelArray(scoreDocs, parallelArray); + } + + @Override + public void close() throws IOException { + onClose.close(); + } + + @Override + public int totalOperations() { + return totalHits; + } + + @Override + public int skippedOperations() { + return skippedOperations; + } + + @Override + public Translog.Operation next() throws IOException { + Translog.Operation op = null; + for (int idx = nextDocIndex(); idx != -1; idx = nextDocIndex()) { + op = readDocAsOp(idx); + if (op != null) { + break; + } + } + if (requiredFullRange) { + rangeCheck(op); + } + if (op != null) { + lastSeenSeqNo = op.seqNo(); + } + return op; + } + + private void rangeCheck(Translog.Operation op) { + if (op == null) { + if (lastSeenSeqNo < toSeqNo) { + throw new IllegalStateException("Not all operations between from_seqno [" + fromSeqNo + "] " + + "and to_seqno [" + toSeqNo + "] found; prematurely terminated last_seen_seqno [" + lastSeenSeqNo + "]"); + } + } else { + final long expectedSeqNo = lastSeenSeqNo + 1; + if (op.seqNo() != expectedSeqNo) { + throw new IllegalStateException("Not all operations between from_seqno [" + fromSeqNo + "] " + + "and to_seqno [" + toSeqNo + "] found; expected seqno [" + expectedSeqNo + "]; found [" + op + "]"); + } + } + } + + private int nextDocIndex() throws IOException { + // we have processed all docs in the current search - fetch the next batch + if (docIndex == scoreDocs.length && docIndex > 0) { + final ScoreDoc prev = scoreDocs[scoreDocs.length - 1]; + scoreDocs = searchOperations(prev).scoreDocs; + fillParallelArray(scoreDocs, parallelArray); + docIndex = 0; + } + if (docIndex < scoreDocs.length) { + int idx = docIndex; + docIndex++; + return idx; + } + return -1; + } + + private void fillParallelArray(ScoreDoc[] scoreDocs, ParallelArray parallelArray) throws IOException { + if (scoreDocs.length > 0) { + for (int i = 0; i < scoreDocs.length; i++) { + scoreDocs[i].shardIndex = i; + } + // for better loading performance we sort the array by docID and + // then visit all leaves in order. + ArrayUtil.introSort(scoreDocs, Comparator.comparingInt(i -> i.doc)); + int docBase = -1; + int maxDoc = 0; + List leaves = indexSearcher.getIndexReader().leaves(); + int readerIndex = 0; + CombinedDocValues combinedDocValues = null; + LeafReaderContext leaf = null; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + if (scoreDoc.doc >= docBase + maxDoc) { + do { + leaf = leaves.get(readerIndex++); + docBase = leaf.docBase; + maxDoc = leaf.reader().maxDoc(); + } while (scoreDoc.doc >= docBase + maxDoc); + combinedDocValues = new CombinedDocValues(leaf.reader()); + } + final int segmentDocID = scoreDoc.doc - docBase; + final int index = scoreDoc.shardIndex; + parallelArray.leafReaderContexts[index] = leaf; + parallelArray.seqNo[index] = combinedDocValues.docSeqNo(segmentDocID); + parallelArray.primaryTerm[index] = combinedDocValues.docPrimaryTerm(segmentDocID); + parallelArray.version[index] = combinedDocValues.docVersion(segmentDocID); + parallelArray.isTombStone[index] = combinedDocValues.isTombstone(segmentDocID); + parallelArray.hasRecoverySource[index] = combinedDocValues.hasRecoverySource(segmentDocID); + } + // now sort back based on the shardIndex. we use this to store the previous index + ArrayUtil.introSort(scoreDocs, Comparator.comparingInt(i -> i.shardIndex)); + } + } + + private TopDocs searchOperations(ScoreDoc after) throws IOException { + final Query rangeQuery = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, lastSeenSeqNo + 1, toSeqNo); + final Sort sortedBySeqNoThenByTerm = new Sort( + new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG), + new SortField(SeqNoFieldMapper.PRIMARY_TERM_NAME, SortField.Type.LONG, true) + ); + return indexSearcher.searchAfter(after, rangeQuery, searchBatchSize, sortedBySeqNoThenByTerm); + } + + private Translog.Operation readDocAsOp(int docIndex) throws IOException { + final LeafReaderContext leaf = parallelArray.leafReaderContexts[docIndex]; + final int segmentDocID = scoreDocs[docIndex].doc - leaf.docBase; + final long primaryTerm = parallelArray.primaryTerm[docIndex]; + // We don't have to read the nested child documents - those docs don't have primary terms. + if (primaryTerm == -1) { + skippedOperations++; + return null; + } + final long seqNo = parallelArray.seqNo[docIndex]; + // Only pick the first seen seq# + if (seqNo == lastSeenSeqNo) { + skippedOperations++; + return null; + } + final long version = parallelArray.version[docIndex]; + final String sourceField = parallelArray.hasRecoverySource[docIndex] ? SourceFieldMapper.RECOVERY_SOURCE_NAME : + SourceFieldMapper.NAME; + final FieldsVisitor fields = new FieldsVisitor(true, sourceField); + leaf.reader().document(segmentDocID, fields); + fields.postProcess(mapperService); + + final Translog.Operation op; + final boolean isTombstone = parallelArray.isTombStone[docIndex]; + if (isTombstone && fields.uid() == null) { + op = new Translog.NoOp(seqNo, primaryTerm, fields.source().utf8ToString()); + assert version == 1L : "Noop tombstone should have version 1L; actual version [" + version + "]"; + assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Noop but soft_deletes field is not set [" + op + "]"; + } else { + final String id = fields.uid().id(); + final String type = fields.uid().type(); + final Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); + if (isTombstone) { + op = new Translog.Delete(type, id, uid, seqNo, primaryTerm, version, VersionType.EXTERNAL); + assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Delete op but soft_deletes field is not set [" + op + "]"; + } else { + final BytesReference source = fields.source(); + if (source == null) { + // TODO: Callers should ask for the range that source should be retained. Thus we should always + // check for the existence source once we make peer-recovery to send ops after the local checkpoint. + if (requiredFullRange) { + throw new IllegalStateException("source not found for seqno=" + seqNo + + " from_seqno=" + fromSeqNo + " to_seqno=" + toSeqNo); + } else { + skippedOperations++; + return null; + } + } + // TODO: pass the latest timestamp from engine. + final long autoGeneratedIdTimestamp = -1; + op = new Translog.Index(type, id, seqNo, primaryTerm, version, VersionType.EXTERNAL, + source.toBytesRef().bytes, fields.routing(), null, autoGeneratedIdTimestamp); + } + } + assert fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && lastSeenSeqNo < op.seqNo() : "Unexpected operation; " + + "last_seen_seqno [" + lastSeenSeqNo + "], from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "], op [" + op + "]"; + return op; + } + + private boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException { + final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); + if (ndv == null || ndv.advanceExact(segmentDocId) == false) { + throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found"); + } + return ndv.longValue() == 1; + } + + private static final class ParallelArray { + final LeafReaderContext[] leafReaderContexts; + final long[] version; + final long[] seqNo; + final long[] primaryTerm; + final boolean[] isTombStone; + final boolean[] hasRecoverySource; + + ParallelArray(int size) { + version = new long[size]; + seqNo = new long[size]; + primaryTerm = new long[size]; + isTombStone = new boolean[size]; + hasRecoverySource = new boolean[size]; + leafReaderContexts = new LeafReaderContext[size]; + } + } + + private static final class CombinedDocValues { + private final NumericDocValues versionDV; + private final NumericDocValues seqNoDV; + private final NumericDocValues primaryTermDV; + private final NumericDocValues tombstoneDV; + private final NumericDocValues recoverySource; + + CombinedDocValues(LeafReader leafReader) throws IOException { + this.versionDV = Objects.requireNonNull(leafReader.getNumericDocValues(VersionFieldMapper.NAME), "VersionDV is missing"); + this.seqNoDV = Objects.requireNonNull(leafReader.getNumericDocValues(SeqNoFieldMapper.NAME), "SeqNoDV is missing"); + this.primaryTermDV = Objects.requireNonNull( + leafReader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME), "PrimaryTermDV is missing"); + this.tombstoneDV = leafReader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME); + this.recoverySource = leafReader.getNumericDocValues(SourceFieldMapper.RECOVERY_SOURCE_NAME); + } + + long docVersion(int segmentDocId) throws IOException { + assert versionDV.docID() < segmentDocId; + if (versionDV.advanceExact(segmentDocId) == false) { + throw new IllegalStateException("DocValues for field [" + VersionFieldMapper.NAME + "] is not found"); + } + return versionDV.longValue(); + } + + long docSeqNo(int segmentDocId) throws IOException { + assert seqNoDV.docID() < segmentDocId; + if (seqNoDV.advanceExact(segmentDocId) == false) { + throw new IllegalStateException("DocValues for field [" + SeqNoFieldMapper.NAME + "] is not found"); + } + return seqNoDV.longValue(); + } + + long docPrimaryTerm(int segmentDocId) throws IOException { + if (primaryTermDV == null) { + return -1L; + } + assert primaryTermDV.docID() < segmentDocId; + // Use -1 for docs which don't have primary term. The caller considers those docs as nested docs. + if (primaryTermDV.advanceExact(segmentDocId) == false) { + return -1; + } + return primaryTermDV.longValue(); + } + + boolean isTombstone(int segmentDocId) throws IOException { + if (tombstoneDV == null) { + return false; + } + assert tombstoneDV.docID() < segmentDocId; + return tombstoneDV.advanceExact(segmentDocId) && tombstoneDV.longValue() > 0; + } + + boolean hasRecoverySource(int segmentDocId) throws IOException { + if (recoverySource == null) { + return false; + } + assert recoverySource.docID() < segmentDocId; + return recoverySource.advanceExact(segmentDocId); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java new file mode 100644 index 0000000000000..fde97562de8f8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -0,0 +1,292 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.StoredFieldsReader; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.CodecReader; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FilterCodecReader; +import org.apache.lucene.index.FilterNumericDocValues; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.OneMergeWrappingMergePolicy; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConjunctionDISI; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BitSetIterator; + +import java.io.IOException; +import java.util.Arrays; +import java.util.function.Supplier; + +final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy { + RecoverySourcePruneMergePolicy(String recoverySourceField, Supplier retainSourceQuerySupplier, MergePolicy in) { + super(in, toWrap -> new OneMerge(toWrap.segments) { + @Override + public CodecReader wrapForMerge(CodecReader reader) throws IOException { + CodecReader wrapped = toWrap.wrapForMerge(reader); + return wrapReader(recoverySourceField, wrapped, retainSourceQuerySupplier); + } + }); + } + + // pkg private for testing + static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) + throws IOException { + NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField); + if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { + return reader; // early terminate - nothing to do here since non of the docs has a recovery source anymore. + } + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.add(new DocValuesFieldExistsQuery(recoverySourceField), BooleanClause.Occur.FILTER); + builder.add(retainSourceQuerySupplier.get(), BooleanClause.Occur.FILTER); + IndexSearcher s = new IndexSearcher(reader); + s.setQueryCache(null); + Weight weight = s.createWeight(s.rewrite(builder.build()), false, 1.0f); + Scorer scorer = weight.scorer(reader.getContext()); + if (scorer != null) { + return new SourcePruningFilterCodecReader(recoverySourceField, reader, BitSet.of(scorer.iterator(), reader.maxDoc())); + } else { + return new SourcePruningFilterCodecReader(recoverySourceField, reader, null); + } + } + + private static class SourcePruningFilterCodecReader extends FilterCodecReader { + private final BitSet recoverySourceToKeep; + private final String recoverySourceField; + + SourcePruningFilterCodecReader(String recoverySourceField, CodecReader reader, BitSet recoverySourceToKeep) { + super(reader); + this.recoverySourceField = recoverySourceField; + this.recoverySourceToKeep = recoverySourceToKeep; + } + + @Override + public DocValuesProducer getDocValuesReader() { + DocValuesProducer docValuesReader = super.getDocValuesReader(); + return new FilterDocValuesProducer(docValuesReader) { + @Override + public NumericDocValues getNumeric(FieldInfo field) throws IOException { + NumericDocValues numeric = super.getNumeric(field); + if (recoverySourceField.equals(field.name)) { + assert numeric != null : recoverySourceField + " must have numeric DV but was null"; + final DocIdSetIterator intersection; + if (recoverySourceToKeep == null) { + // we can't return null here lucenes DocIdMerger expects an instance + intersection = DocIdSetIterator.empty(); + } else { + intersection = ConjunctionDISI.intersectIterators(Arrays.asList(numeric, + new BitSetIterator(recoverySourceToKeep, recoverySourceToKeep.length()))); + } + return new FilterNumericDocValues(numeric) { + @Override + public int nextDoc() throws IOException { + return intersection.nextDoc(); + } + + @Override + public int advance(int target) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean advanceExact(int target) { + throw new UnsupportedOperationException(); + } + }; + + } + return numeric; + } + }; + } + + @Override + public StoredFieldsReader getFieldsReader() { + StoredFieldsReader fieldsReader = super.getFieldsReader(); + return new FilterStoredFieldsReader(fieldsReader) { + @Override + public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + if (recoverySourceToKeep != null && recoverySourceToKeep.get(docID)) { + super.visitDocument(docID, visitor); + } else { + super.visitDocument(docID, new FilterStoredFieldVisitor(visitor) { + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + if (recoverySourceField.equals(fieldInfo.name)) { + return Status.NO; + } + return super.needsField(fieldInfo); + } + }); + } + } + }; + } + + @Override + public CacheHelper getCoreCacheHelper() { + return null; + } + + @Override + public CacheHelper getReaderCacheHelper() { + return null; + } + + private static class FilterDocValuesProducer extends DocValuesProducer { + private final DocValuesProducer in; + + FilterDocValuesProducer(DocValuesProducer in) { + this.in = in; + } + + @Override + public NumericDocValues getNumeric(FieldInfo field) throws IOException { + return in.getNumeric(field); + } + + @Override + public BinaryDocValues getBinary(FieldInfo field) throws IOException { + return in.getBinary(field); + } + + @Override + public SortedDocValues getSorted(FieldInfo field) throws IOException { + return in.getSorted(field); + } + + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { + return in.getSortedNumeric(field); + } + + @Override + public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException { + return in.getSortedSet(field); + } + + @Override + public void checkIntegrity() throws IOException { + in.checkIntegrity(); + } + + @Override + public void close() throws IOException { + in.close(); + } + + @Override + public long ramBytesUsed() { + return in.ramBytesUsed(); + } + } + + private static class FilterStoredFieldsReader extends StoredFieldsReader { + + private final StoredFieldsReader fieldsReader; + + FilterStoredFieldsReader(StoredFieldsReader fieldsReader) { + this.fieldsReader = fieldsReader; + } + + @Override + public long ramBytesUsed() { + return fieldsReader.ramBytesUsed(); + } + + @Override + public void close() throws IOException { + fieldsReader.close(); + } + + @Override + public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + fieldsReader.visitDocument(docID, visitor); + } + + @Override + public StoredFieldsReader clone() { + return fieldsReader.clone(); + } + + @Override + public void checkIntegrity() throws IOException { + fieldsReader.checkIntegrity(); + } + } + + private static class FilterStoredFieldVisitor extends StoredFieldVisitor { + private final StoredFieldVisitor visitor; + + FilterStoredFieldVisitor(StoredFieldVisitor visitor) { + this.visitor = visitor; + } + + @Override + public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + visitor.binaryField(fieldInfo, value); + } + + @Override + public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException { + visitor.stringField(fieldInfo, value); + } + + @Override + public void intField(FieldInfo fieldInfo, int value) throws IOException { + visitor.intField(fieldInfo, value); + } + + @Override + public void longField(FieldInfo fieldInfo, long value) throws IOException { + visitor.longField(fieldInfo, value); + } + + @Override + public void floatField(FieldInfo fieldInfo, float value) throws IOException { + visitor.floatField(fieldInfo, value); + } + + @Override + public void doubleField(FieldInfo fieldInfo, double value) throws IOException { + visitor.doubleField(fieldInfo, value); + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + return visitor.needsField(fieldInfo); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java new file mode 100644 index 0000000000000..af2ded8c46620 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/SoftDeletesPolicy.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.translog.Translog; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.LongSupplier; + +/** + * A policy that controls how many soft-deleted documents should be retained for peer-recovery and querying history changes purpose. + */ +final class SoftDeletesPolicy { + private final LongSupplier globalCheckpointSupplier; + private long localCheckpointOfSafeCommit; + // This lock count is used to prevent `minRetainedSeqNo` from advancing. + private int retentionLockCount; + // The extra number of operations before the global checkpoint are retained + private long retentionOperations; + // The min seq_no value that is retained - ops after this seq# should exist in the Lucene index. + private long minRetainedSeqNo; + + SoftDeletesPolicy(LongSupplier globalCheckpointSupplier, long minRetainedSeqNo, long retentionOperations) { + this.globalCheckpointSupplier = globalCheckpointSupplier; + this.retentionOperations = retentionOperations; + this.minRetainedSeqNo = minRetainedSeqNo; + this.localCheckpointOfSafeCommit = SequenceNumbers.NO_OPS_PERFORMED; + this.retentionLockCount = 0; + } + + /** + * Updates the number of soft-deleted documents prior to the global checkpoint to be retained + * See {@link org.elasticsearch.index.IndexSettings#INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING} + */ + synchronized void setRetentionOperations(long retentionOperations) { + this.retentionOperations = retentionOperations; + } + + /** + * Sets the local checkpoint of the current safe commit + */ + synchronized void setLocalCheckpointOfSafeCommit(long newCheckpoint) { + if (newCheckpoint < this.localCheckpointOfSafeCommit) { + throw new IllegalArgumentException("Local checkpoint can't go backwards; " + + "new checkpoint [" + newCheckpoint + "]," + "current checkpoint [" + localCheckpointOfSafeCommit + "]"); + } + this.localCheckpointOfSafeCommit = newCheckpoint; + } + + /** + * Acquires a lock on soft-deleted documents to prevent them from cleaning up in merge processes. This is necessary to + * make sure that all operations that are being retained will be retained until the lock is released. + * This is a analogy to the translog's retention lock; see {@link Translog#acquireRetentionLock()} + */ + synchronized Releasable acquireRetentionLock() { + assert retentionLockCount >= 0 : "Invalid number of retention locks [" + retentionLockCount + "]"; + retentionLockCount++; + final AtomicBoolean released = new AtomicBoolean(); + return () -> { + if (released.compareAndSet(false, true)) { + releaseRetentionLock(); + } + }; + } + + private synchronized void releaseRetentionLock() { + assert retentionLockCount > 0 : "Invalid number of retention locks [" + retentionLockCount + "]"; + retentionLockCount--; + } + + /** + * Returns the min seqno that is retained in the Lucene index. + * Operations whose seq# is least this value should exist in the Lucene index. + */ + synchronized long getMinRetainedSeqNo() { + // Do not advance if the retention lock is held + if (retentionLockCount == 0) { + // This policy retains operations for two purposes: peer-recovery and querying changes history. + // - Peer-recovery is driven by the local checkpoint of the safe commit. In peer-recovery, the primary transfers a safe commit, + // then sends ops after the local checkpoint of that commit. This requires keeping all ops after localCheckpointOfSafeCommit; + // - Changes APIs are driven the combination of the global checkpoint and retention ops. Here we prefer using the global + // checkpoint instead of max_seqno because only operations up to the global checkpoint are exposed in the the changes APIs. + final long minSeqNoForQueryingChanges = globalCheckpointSupplier.getAsLong() - retentionOperations; + final long minSeqNoToRetain = Math.min(minSeqNoForQueryingChanges, localCheckpointOfSafeCommit) + 1; + // This can go backward as the retentionOperations value can be changed in settings. + minRetainedSeqNo = Math.max(minRetainedSeqNo, minSeqNoToRetain); + } + return minRetainedSeqNo; + } + + /** + * Returns a soft-deletes retention query that will be used in {@link org.apache.lucene.index.SoftDeletesRetentionMergePolicy} + * Documents including tombstones are soft-deleted and matched this query will be retained and won't cleaned up by merges. + */ + Query getRetentionQuery() { + return LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, getMinRetainedSeqNo(), Long.MAX_VALUE); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index 6eae8bdb1668d..0e5d8786984c8 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -58,13 +58,19 @@ public class FieldsVisitor extends StoredFieldVisitor { ParentFieldMapper.NAME)); private final boolean loadSource; + private final String sourceFieldName; private final Set requiredFields; protected BytesReference source; protected String type, id; protected Map> fieldsValues; public FieldsVisitor(boolean loadSource) { + this(loadSource, SourceFieldMapper.NAME); + } + + public FieldsVisitor(boolean loadSource, String sourceFieldName) { this.loadSource = loadSource; + this.sourceFieldName = sourceFieldName; requiredFields = new HashSet<>(); reset(); } @@ -110,7 +116,7 @@ public void postProcess(MapperService mapperService) { @Override public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { - if (SourceFieldMapper.NAME.equals(fieldInfo.name)) { + if (sourceFieldName.equals(fieldInfo.name)) { source = new BytesArray(value); } else if (IdFieldMapper.NAME.equals(fieldInfo.name)) { id = Uid.decodeId(value); @@ -194,7 +200,7 @@ public void reset() { requiredFields.addAll(BASE_REQUIRED_FIELDS); if (loadSource) { - requiredFields.add(SourceFieldMapper.NAME); + requiredFields.add(sourceFieldName); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index c9da7397a8402..2d3cd655d28ff 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -19,11 +19,14 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.document.StoredField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; @@ -39,12 +42,15 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Stream; public class DocumentMapper implements ToXContentFragment { @@ -121,6 +127,8 @@ public DocumentMapper build(MapperService mapperService) { private final Map objectMappers; private final boolean hasNestedObjects; + private final MetadataFieldMapper[] deleteTombstoneMetadataFieldMappers; + private final MetadataFieldMapper[] noopTombstoneMetadataFieldMappers; public DocumentMapper(MapperService mapperService, Mapping mapping) { this.mapperService = mapperService; @@ -176,6 +184,15 @@ public DocumentMapper(MapperService mapperService, Mapping mapping) { } catch (Exception e) { throw new ElasticsearchGenerationException("failed to serialize source for type [" + type + "]", e); } + + final Collection deleteTombstoneMetadataFields = Arrays.asList(VersionFieldMapper.NAME, IdFieldMapper.NAME, + TypeFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME); + this.deleteTombstoneMetadataFieldMappers = Stream.of(mapping.metadataMappers) + .filter(field -> deleteTombstoneMetadataFields.contains(field.name())).toArray(MetadataFieldMapper[]::new); + final Collection noopTombstoneMetadataFields = Arrays.asList( + VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME); + this.noopTombstoneMetadataFieldMappers = Stream.of(mapping.metadataMappers) + .filter(field -> noopTombstoneMetadataFields.contains(field.name())).toArray(MetadataFieldMapper[]::new); } public Mapping mapping() { @@ -260,7 +277,22 @@ public Map objectMappers() { } public ParsedDocument parse(SourceToParse source) throws MapperParsingException { - return documentParser.parseDocument(source); + return documentParser.parseDocument(source, mapping.metadataMappers); + } + + public ParsedDocument createDeleteTombstoneDoc(String index, String type, String id) throws MapperParsingException { + final SourceToParse emptySource = SourceToParse.source(index, type, id, new BytesArray("{}"), XContentType.JSON); + return documentParser.parseDocument(emptySource, deleteTombstoneMetadataFieldMappers).toTombstone(); + } + + public ParsedDocument createNoopTombstoneDoc(String index, String reason) throws MapperParsingException { + final String id = ""; // _id won't be used. + final SourceToParse sourceToParse = SourceToParse.source(index, type, id, new BytesArray("{}"), XContentType.JSON); + final ParsedDocument parsedDoc = documentParser.parseDocument(sourceToParse, noopTombstoneMetadataFieldMappers).toTombstone(); + // Store the reason of a noop as a raw string in the _source field + final BytesRef byteRef = new BytesRef(reason); + parsedDoc.rootDoc().add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length)); + return parsedDoc; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 4556eb793199c..368d8536a5853 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -55,7 +55,7 @@ final class DocumentParser { this.docMapper = docMapper; } - ParsedDocument parseDocument(SourceToParse source) throws MapperParsingException { + ParsedDocument parseDocument(SourceToParse source, MetadataFieldMapper[] metadataFieldsMappers) throws MapperParsingException { validateType(source); final Mapping mapping = docMapper.mapping(); @@ -64,9 +64,9 @@ ParsedDocument parseDocument(SourceToParse source) throws MapperParsingException try (XContentParser parser = XContentHelper.createParser(docMapperParser.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, source.source(), xContentType)) { - context = new ParseContext.InternalParseContext(indexSettings.getSettings(), docMapperParser, docMapper, source, parser); + context = new ParseContext.InternalParseContext(indexSettings, docMapperParser, docMapper, source, parser); validateStart(parser); - internalParseDocument(mapping, context, parser); + internalParseDocument(mapping, metadataFieldsMappers, context, parser); validateEnd(parser); } catch (Exception e) { throw wrapInMapperParsingException(source, e); @@ -81,10 +81,11 @@ ParsedDocument parseDocument(SourceToParse source) throws MapperParsingException return parsedDocument(source, context, createDynamicUpdate(mapping, docMapper, context.getDynamicMappers())); } - private static void internalParseDocument(Mapping mapping, ParseContext.InternalParseContext context, XContentParser parser) throws IOException { + private static void internalParseDocument(Mapping mapping, MetadataFieldMapper[] metadataFieldsMappers, + ParseContext.InternalParseContext context, XContentParser parser) throws IOException { final boolean emptyDoc = isEmptyDoc(mapping, parser); - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { + for (MetadataFieldMapper metadataMapper : metadataFieldsMappers) { metadataMapper.preParse(context); } @@ -95,7 +96,7 @@ private static void internalParseDocument(Mapping mapping, ParseContext.Internal parseObjectOrNested(context, mapping.root); } - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { + for (MetadataFieldMapper metadataMapper : metadataFieldsMappers) { metadataMapper.postParse(context); } } @@ -512,7 +513,7 @@ private static void parseObject(final ParseContext context, ObjectMapper mapper, if (builder == null) { builder = new ObjectMapper.Builder(currentFieldName).enabled(true); } - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); objectMapper = builder.build(builderContext); context.addDynamicMapper(objectMapper); context.path().add(currentFieldName); @@ -555,7 +556,7 @@ private static void parseArray(ParseContext context, ObjectMapper parentMapper, if (builder == null) { parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } else { - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); mapper = builder.build(builderContext); assert mapper != null; if (mapper instanceof ArrayValueMapperParser) { @@ -713,13 +714,13 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont if (parseableAsLong && context.root().numericDetection()) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG); if (builder == null) { - builder = newLongBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newLongBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } else if (parseableAsDouble && context.root().numericDetection()) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.DOUBLE); if (builder == null) { - builder = newFloatBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newFloatBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } else if (parseableAsLong == false && parseableAsDouble == false && context.root().dateDetection()) { @@ -735,7 +736,7 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont } Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.DATE); if (builder == null) { - builder = newDateBuilder(currentFieldName, dateTimeFormatter, Version.indexCreated(context.indexSettings())); + builder = newDateBuilder(currentFieldName, dateTimeFormatter, context.indexSettings().getIndexVersionCreated()); } if (builder instanceof DateFieldMapper.Builder) { DateFieldMapper.Builder dateBuilder = (DateFieldMapper.Builder) builder; @@ -758,7 +759,7 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont if (numberType == XContentParser.NumberType.INT || numberType == XContentParser.NumberType.LONG) { Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.LONG); if (builder == null) { - builder = newLongBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newLongBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } else if (numberType == XContentParser.NumberType.FLOAT || numberType == XContentParser.NumberType.DOUBLE) { @@ -767,7 +768,7 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont // no templates are defined, we use float by default instead of double // since this is much more space-efficient and should be enough most of // the time - builder = newFloatBuilder(currentFieldName, Version.indexCreated(context.indexSettings())); + builder = newFloatBuilder(currentFieldName, context.indexSettings().getIndexVersionCreated()); } return builder; } @@ -802,7 +803,7 @@ private static void parseDynamicValue(final ParseContext context, ObjectMapper p return; } final String path = context.path().pathAsText(currentFieldName); - final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); final MappedFieldType existingFieldType = context.mapperService().fullName(path); final Mapper.Builder builder; if (existingFieldType != null) { @@ -900,8 +901,8 @@ private static Tuple getDynamicParentMapper(ParseContext if (builder == null) { builder = new ObjectMapper.Builder(paths[i]).enabled(true); } - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); - mapper = (ObjectMapper) builder.build(builderContext); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), + context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to` or dots in field names"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 3268b94207547..087269b7ec9c7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.lucene.Lucene; @@ -215,12 +214,12 @@ public FieldNamesFieldType fieldType() { } @Override - public void preParse(ParseContext context) throws IOException { + public void preParse(ParseContext context) { } @Override public void postParse(ParseContext context) throws IOException { - if (context.indexSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).before(Version.V_6_1_0)) { + if (context.indexSettings().getIndexVersionCreated().before(Version.V_6_1_0)) { super.parse(context); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index d3132d4da74ec..693dae6033af8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -25,10 +25,9 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.all.AllEntries; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; import java.util.ArrayList; import java.util.Collection; @@ -198,7 +197,7 @@ public boolean isWithinMultiFields() { } @Override - public Settings indexSettings() { + public IndexSettings indexSettings() { return in.indexSettings(); } @@ -322,8 +321,7 @@ public static class InternalParseContext extends ParseContext { private final List documents; - @Nullable - private final Settings indexSettings; + private final IndexSettings indexSettings; private final SourceToParse sourceToParse; @@ -339,8 +337,8 @@ public static class InternalParseContext extends ParseContext { private final Set ignoredFields = new HashSet<>(); - public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, - SourceToParse source, XContentParser parser) { + public InternalParseContext(IndexSettings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, + SourceToParse source, XContentParser parser) { this.indexSettings = indexSettings; this.docMapper = docMapper; this.docMapperParser = docMapperParser; @@ -361,8 +359,7 @@ public DocumentMapperParser docMapperParser() { } @Override - @Nullable - public Settings indexSettings() { + public IndexSettings indexSettings() { return this.indexSettings; } @@ -583,8 +580,7 @@ public boolean isWithinMultiFields() { return false; } - @Nullable - public abstract Settings indexSettings(); + public abstract IndexSettings indexSettings(); public abstract SourceToParse sourceToParse(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index 11804c2e88e1d..a01a9afc38375 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -85,6 +85,17 @@ public void updateSeqID(long sequenceNumber, long primaryTerm) { this.seqID.primaryTerm.setLongValue(primaryTerm); } + /** + * Makes the processing document as a tombstone document rather than a regular document. + * Tombstone documents are stored in Lucene index to represent delete operations or Noops. + */ + ParsedDocument toTombstone() { + assert docs().size() == 1 : "Tombstone should have a single doc [" + docs() + "]"; + this.seqID.tombstoneField.setLongValue(1); + rootDoc().add(this.seqID.tombstoneField); + return this; + } + public String routing() { return this.routing; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index ea15991aa942f..981138f60673a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -69,26 +69,29 @@ public static class SequenceIDFields { public final Field seqNo; public final Field seqNoDocValue; public final Field primaryTerm; + public final Field tombstoneField; - public SequenceIDFields(Field seqNo, Field seqNoDocValue, Field primaryTerm) { + public SequenceIDFields(Field seqNo, Field seqNoDocValue, Field primaryTerm, Field tombstoneField) { Objects.requireNonNull(seqNo, "sequence number field cannot be null"); Objects.requireNonNull(seqNoDocValue, "sequence number dv field cannot be null"); Objects.requireNonNull(primaryTerm, "primary term field cannot be null"); this.seqNo = seqNo; this.seqNoDocValue = seqNoDocValue; this.primaryTerm = primaryTerm; + this.tombstoneField = tombstoneField; } public static SequenceIDFields emptySeqID() { return new SequenceIDFields(new LongPoint(NAME, SequenceNumbers.UNASSIGNED_SEQ_NO), new NumericDocValuesField(NAME, SequenceNumbers.UNASSIGNED_SEQ_NO), - new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); + new NumericDocValuesField(PRIMARY_TERM_NAME, 0), new NumericDocValuesField(TOMBSTONE_NAME, 0)); } } public static final String NAME = "_seq_no"; public static final String CONTENT_TYPE = "_seq_no"; public static final String PRIMARY_TERM_NAME = "_primary_term"; + public static final String TOMBSTONE_NAME = "_tombstone"; public static class SeqNoDefaults { public static final String NAME = SeqNoFieldMapper.NAME; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index cc360f0cefb10..479baab89c753 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -50,6 +51,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static final String NAME = "_source"; + public static final String RECOVERY_SOURCE_NAME = "_recovery_source"; public static final String CONTENT_TYPE = "_source"; private final Function, Map> filter; @@ -228,7 +230,8 @@ public Mapper parse(ParseContext context) throws IOException { @Override protected void parseCreateField(ParseContext context, List fields) throws IOException { - BytesReference source = context.sourceToParse().source(); + BytesReference originalSource = context.sourceToParse().source(); + BytesReference source = originalSource; if (enabled && fieldType().stored() && source != null) { // Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data if (filter != null) { @@ -244,8 +247,17 @@ protected void parseCreateField(ParseContext context, List field } BytesRef ref = source.toBytesRef(); fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length)); + } else { + source = null; } - } + + if (originalSource != null && source != originalSource && context.indexSettings().isSoftDeleteEnabled()) { + // if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery + BytesRef ref = originalSource.toBytesRef(); + fields.add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); + fields.add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); + } + } @Override protected String contentType() { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 6db5c21f56626..826cc459e16a3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -93,12 +93,14 @@ import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.RootObjectMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; @@ -1659,25 +1661,33 @@ public void onSettingsChanged() { } /** - * Acquires a lock on the translog files, preventing them from being trimmed. + * Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed */ - public Closeable acquireTranslogRetentionLock() { - return getEngine().acquireTranslogRetentionLock(); + public Closeable acquireRetentionLockForPeerRecovery() { + return getEngine().acquireRetentionLockForPeerRecovery(); } /** - * Creates a new translog snapshot for reading translog operations whose seq# at least the provided seq#. - * The caller has to close the returned snapshot after finishing the reading. + * Returns the estimated number of history operations whose seq# at least the provided seq# in this shard. */ - public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException { - return getEngine().newTranslogSnapshotFromMinSeqNo(minSeqNo); + public int estimateNumberOfHistoryOperations(String source, long startingSeqNo) throws IOException { + return getEngine().estimateNumberOfHistoryOperations(source, mapperService, startingSeqNo); } /** - * Returns the estimated number of operations in translog whose seq# at least the provided seq#. + * Creates a new history snapshot for reading operations since the provided starting seqno (inclusive). + * The returned snapshot can be retrieved from either Lucene index or translog files. */ - public int estimateTranslogOperationsFromMinSeq(long minSeqNo) { - return getEngine().estimateTranslogOperationsFromMinSeq(minSeqNo); + public Translog.Snapshot getHistoryOperations(String source, long startingSeqNo) throws IOException { + return getEngine().readHistoryOperations(source, mapperService, startingSeqNo); + } + + /** + * Checks if we have a completed history of operations since the given starting seqno (inclusive). + * This method should be called after acquiring the retention lock; See {@link #acquireRetentionLockForPeerRecovery()} + */ + public boolean hasCompleteHistoryOperations(String source, long startingSeqNo) throws IOException { + return getEngine().hasCompleteOperationHistory(source, mapperService, startingSeqNo); } public List segments(boolean verbose) { @@ -2240,7 +2250,7 @@ private EngineConfig newEngineConfig() { IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), - indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, () -> operationPrimaryTerm); + indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, () -> operationPrimaryTerm, tombstoneDocSupplier()); } /** @@ -2614,4 +2624,19 @@ public void afterRefresh(boolean didRefresh) throws IOException { refreshMetric.inc(System.nanoTime() - currentRefreshStartTime); } } + + private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() { + final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop"); + final DocumentMapper noopDocumentMapper = new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService); + return new EngineConfig.TombstoneDocSupplier() { + @Override + public ParsedDocument newDeleteTombstoneDoc(String type, String id) { + return docMapper(type).getDocumentMapper().createDeleteTombstoneDoc(shardId.getIndexName(), type, id); + } + @Override + public ParsedDocument newNoopTombstoneDoc(String reason) { + return noopDocumentMapper.createNoopTombstoneDoc(shardId.getIndexName(), reason); + } + }; + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 1edc0eb5dcafe..016a8afff6964 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -89,7 +89,7 @@ public void resync(final IndexShard indexShard, final ActionListener // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. // Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible // Also fail the resync early if the shard is shutting down - snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo); + snapshot = indexShard.getHistoryOperations("resync", startingSeqNo); final Translog.Snapshot originalSnapshot = snapshot; final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() { @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 9b2de6b6374f6..5e5ea840d4b81 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -156,6 +156,7 @@ void addIndices(final RecoveryState.Index indexRecoveryStats, final Directory ta final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index fa1534ef922c7..9024d1ef37835 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -998,7 +998,6 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { } final String segmentId = IndexFileNames.parseSegmentName(meta.name()); final String extension = IndexFileNames.getExtension(meta.name()); - assert FIELD_INFOS_FILE_EXTENSION.equals(extension) == false || IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(meta.name())).isEmpty() : "FieldInfos are generational but updateable DV are not supported in elasticsearch"; if (IndexFileNames.SEGMENTS.equals(segmentId) || DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) { // only treat del files as per-commit files fnm files are generational but only for upgradable DV perCommitStoreFiles.add(meta); @@ -1612,6 +1611,7 @@ private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openM throws IOException { assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setIndexCommit(commit) // we don't want merges to happen here - we call maybe merge on the engine diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 73e3acb53cfcc..1086ba8f35029 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -1291,6 +1291,8 @@ public String toString() { ", type='" + type + '\'' + ", seqNo=" + seqNo + ", primaryTerm=" + primaryTerm + + ", version=" + version + + ", autoGeneratedIdTimestamp=" + autoGeneratedIdTimestamp + '}'; } @@ -1455,6 +1457,7 @@ public String toString() { "uid=" + uid + ", seqNo=" + seqNo + ", primaryTerm=" + primaryTerm + + ", version=" + version + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 86995ae7c5a99..a90f8af0af42c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -32,6 +32,7 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.EnvironmentAwareCommand; @@ -177,6 +178,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th terminal.println("Marking index with the new history uuid"); // commit the new histroy id IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 352f07d57649d..10f796e5e1551 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -146,11 +146,11 @@ public RecoveryResponse recoverToTarget() throws IOException { assert targetShardRouting.initializing() : "expected recovery target to be initializing but was " + targetShardRouting; }, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered ", shard, cancellableThreads, logger); - try (Closeable ignored = shard.acquireTranslogRetentionLock()) { + try (Closeable ignored = shard.acquireRetentionLockForPeerRecovery()) { final long startingSeqNo; final long requiredSeqNoRangeStart; final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && - isTargetSameHistory() && isTranslogReadyForSequenceNumberBasedRecovery(); + isTargetSameHistory() && shard.hasCompleteHistoryOperations("peer-recovery", request.startingSeqNo()); if (isSequenceNumberBasedRecovery) { logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); startingSeqNo = request.startingSeqNo(); @@ -162,14 +162,16 @@ public RecoveryResponse recoverToTarget() throws IOException { } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); } - // we set this to 0 to create a translog roughly according to the retention policy - // on the target. Note that it will still filter out legacy operations with no sequence numbers - startingSeqNo = 0; - // but we must have everything above the local checkpoint in the commit + // We must have everything above the local checkpoint in the commit requiredSeqNoRangeStart = Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; + // If soft-deletes enabled, we need to transfer only operations after the local_checkpoint of the commit to have + // the same history on the target. However, with translog, we need to set this to 0 to create a translog roughly + // according to the retention policy on the target. Note that it will still filter out legacy operations without seqNo. + startingSeqNo = shard.indexSettings().isSoftDeleteEnabled() ? requiredSeqNoRangeStart : 0; try { - phase1(phase1Snapshot.getIndexCommit(), () -> shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); + final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); + phase1(phase1Snapshot.getIndexCommit(), () -> estimateNumOps); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); } finally { @@ -186,7 +188,8 @@ public RecoveryResponse recoverToTarget() throws IOException { try { // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); } @@ -207,11 +210,13 @@ public RecoveryResponse recoverToTarget() throws IOException { */ cancellableThreads.execute(() -> shard.waitForOpsToComplete(endingSeqNo)); - logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); - - logger.trace("snapshot translog for recovery; current size is [{}]", shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); + if (logger.isTraceEnabled()) { + logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); + logger.trace("snapshot translog for recovery; current size is [{}]", + shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo)); + } final long targetLocalCheckpoint; - try(Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) { + try (Translog.Snapshot snapshot = shard.getHistoryOperations("peer-recovery", startingSeqNo)) { targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot); } catch (Exception e) { throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e); @@ -268,36 +273,6 @@ public void onFailure(Exception e) { }); } - /** - * Determines if the source translog is ready for a sequence-number-based peer recovery. The main condition here is that the source - * translog contains all operations above the local checkpoint on the target. We already know the that translog contains or will contain - * all ops above the source local checkpoint, so we can stop check there. - * - * @return {@code true} if the source is ready for a sequence-number-based recovery - * @throws IOException if an I/O exception occurred reading the translog snapshot - */ - boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException { - final long startingSeqNo = request.startingSeqNo(); - assert startingSeqNo >= 0; - final long localCheckpoint = shard.getLocalCheckpoint(); - logger.trace("testing sequence numbers in range: [{}, {}]", startingSeqNo, localCheckpoint); - // the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one - if (startingSeqNo - 1 <= localCheckpoint) { - final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); - try (Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) { - Translog.Operation operation; - while ((operation = snapshot.next()) != null) { - if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - tracker.markSeqNoAsCompleted(operation.seqNo()); - } - } - } - return tracker.getCheckpoint() >= localCheckpoint; - } else { - return false; - } - } - /** * Perform phase1 of the recovery operations. Once this {@link IndexCommit} * snapshot has been performed no commit operations (files being fsync'd) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 27f75d51a1444..cdf89f37598cf 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1495,6 +1495,7 @@ public void restore() throws IOException { // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty // shard anyway, we just create the empty shard here and then exit. IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setOpenMode(IndexWriterConfig.OpenMode.CREATE) .setCommitOnClose(true)); writer.close(); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 702d63d0d9401..6acdbad2ccec9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -64,6 +64,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; @@ -120,7 +121,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp SETTING_NUMBER_OF_SHARDS, SETTING_VERSION_CREATED, SETTING_INDEX_UUID, - SETTING_CREATION_DATE)); + SETTING_CREATION_DATE, + IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey())); // It's OK to change some settings, but we shouldn't allow simply removing them private static final Set UNREMOVABLE_SETTINGS; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 90173455c3be3..9786c0eaf5290 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -392,6 +392,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { assertThat(shard.getLocalCheckpoint(), equalTo(numDocs + moreDocs)); } }, 30, TimeUnit.SECONDS); + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); } } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index 753aedea01e02..890f6ef163b33 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -33,18 +33,23 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.StringReader; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -53,6 +58,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.equalTo; + public class LuceneTests extends ESTestCase { public void testWaitForIndex() throws Exception { final MockDirectoryWrapper dir = newMockDirectory(); @@ -406,4 +413,88 @@ public void testMMapHackSupported() throws Exception { // add assume's here if needed for certain platforms, but we should know if it does not work. assertTrue("MMapDirectory does not support unmapping: " + MMapDirectory.UNMAP_NOT_SUPPORTED_REASON, MMapDirectory.UNMAP_SUPPORTED); } + + public void testWrapAllDocsLive() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + IndexWriter writer = new IndexWriter(dir, config); + int numDocs = between(1, 10); + Set liveDocs = new HashSet<>(); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + Document doc = new Document(); + doc.add(new StringField("id", id, Store.YES)); + writer.addDocument(doc); + liveDocs.add(id); + } + for (int i = 0; i < numDocs; i++) { + if (randomBoolean()) { + String id = Integer.toString(i); + Document doc = new Document(); + doc.add(new StringField("id", "v2-" + id, Store.YES)); + if (randomBoolean()) { + doc.add(Lucene.newSoftDeletesField()); + } + writer.softUpdateDocument(new Term("id", id), doc, Lucene.newSoftDeletesField()); + liveDocs.add("v2-" + id); + } + } + try (DirectoryReader unwrapped = DirectoryReader.open(writer)) { + DirectoryReader reader = Lucene.wrapAllDocsLive(unwrapped); + assertThat(reader.numDocs(), equalTo(liveDocs.size())); + IndexSearcher searcher = new IndexSearcher(reader); + Set actualDocs = new HashSet<>(); + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + actualDocs.add(reader.document(scoreDoc.doc).get("id")); + } + assertThat(actualDocs, equalTo(liveDocs)); + } + IOUtils.close(writer, dir); + } + + public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + IndexWriter writer = new IndexWriter(dir, config); + int numDocs = between(1, 10); + List liveDocs = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + Document doc = new Document(); + doc.add(new StringField("id", id, Store.YES)); + if (randomBoolean()) { + doc.add(Lucene.newSoftDeletesField()); + } + writer.addDocument(doc); + liveDocs.add(id); + } + int abortedDocs = between(1, 10); + for (int i = 0; i < abortedDocs; i++) { + try { + Document doc = new Document(); + doc.add(new StringField("id", "aborted-" + i, Store.YES)); + StringReader reader = new StringReader(""); + doc.add(new TextField("other", reader)); + reader.close(); // mark the indexing hit non-aborting error + writer.addDocument(doc); + fail("index should have failed"); + } catch (Exception ignored) { } + } + try (DirectoryReader unwrapped = DirectoryReader.open(writer)) { + DirectoryReader reader = Lucene.wrapAllDocsLive(unwrapped); + assertThat(reader.maxDoc(), equalTo(numDocs + abortedDocs)); + assertThat(reader.numDocs(), equalTo(liveDocs.size())); + IndexSearcher searcher = new IndexSearcher(reader); + List actualDocs = new ArrayList<>(); + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE); + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + actualDocs.add(reader.document(scoreDoc.doc).get("id")); + } + assertThat(actualDocs, equalTo(liveDocs)); + } + IOUtils.close(writer, dir); + } } diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index 6bdd8ea3f2e07..ac2f2b0d4f32e 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -109,6 +109,7 @@ public void setDisruptionScheme(ServiceDisruptionScheme scheme) { protected void beforeIndexDeletion() throws Exception { if (disableBeforeIndexDeletion == false) { super.beforeIndexDeletion(); + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); assertSeqNos(); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 154d702e7fb77..84cc1390e907f 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -396,7 +397,8 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .get(); logger.info("--> indexing docs"); - for (int i = 0; i < randomIntBetween(1, 1024); i++) { + int numDocs = randomIntBetween(1, 1024); + for (int i = 0; i < numDocs; i++) { client(primaryNode).prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); } @@ -418,12 +420,15 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { } logger.info("--> restart replica node"); + boolean softDeleteEnabled = internalCluster().getInstance(IndicesService.class, primaryNode) + .indexServiceSafe(resolveIndex("test")).getShard(0).indexSettings().isSoftDeleteEnabled(); + int moreDocs = randomIntBetween(1, 1024); internalCluster().restartNode(replicaNode, new RestartCallback() { @Override public Settings onNodeStopped(String nodeName) throws Exception { // index some more documents; we expect to reuse the files that already exist on the replica - for (int i = 0; i < randomIntBetween(1, 1024); i++) { + for (int i = 0; i < moreDocs; i++) { client(primaryNode).prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); } @@ -431,8 +436,12 @@ public Settings onNodeStopped(String nodeName) throws Exception { client(primaryNode).admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) ).get(); client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get(); + if (softDeleteEnabled) { // We need an extra flush to advance the min_retained_seqno of the SoftDeletesPolicy + client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get(); + } return super.onNodeStopped(nodeName); } }); diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 50719a1c3fed0..18fd9b08a5101 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -289,7 +290,7 @@ public void testAsyncTranslogTrimActuallyWorks() throws Exception { .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1)) .get(); IndexShard shard = indexService.getShard(0); - assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0))); + assertBusy(() -> assertThat(IndexShardTestCase.getTranslog(shard).totalOperations(), equalTo(0))); } public void testIllegalFsyncInterval() { diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 08f517552b098..43944759f00d8 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -607,4 +607,12 @@ public void testQueryDefaultField() { ); assertThat(index.getDefaultFields(), equalTo(Arrays.asList("body", "title"))); } + + public void testUpdateSoftDeletesFails() { + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> + settings.updateSettings(Settings.builder().put("index.soft_deletes.enabled", randomBoolean()).build(), + Settings.builder(), Settings.builder(), "index")); + assertThat(error.getMessage(), equalTo("final index setting [index.soft_deletes.enabled], not updateable")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index 5ea55183c610e..d74b9b41a8867 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -52,20 +52,24 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); + final int extraRetainedOps = between(0, 100); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); final LongArrayList maxSeqNoList = new LongArrayList(); final LongArrayList translogGenList = new LongArrayList(); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = 0; + long lastCheckpoint = lastMaxSeqNo; long lastTranslogGen = 0; final UUID translogUUID = UUID.randomUUID(); for (int i = 0; i < totalCommits; i++) { lastMaxSeqNo += between(1, 10000); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); lastTranslogGen += between(1, 100); - commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID, lastTranslogGen)); maxSeqNoList.add(lastMaxSeqNo); translogGenList.add(lastTranslogGen); } @@ -86,14 +90,19 @@ public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { } assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(translogGenList.get(keptIndex))); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(commitList.get(keptIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); } public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); + final int extraRetainedOps = between(0, 100); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, extraRetainedOps); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); long lastMaxSeqNo = between(1, 1000); + long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo); long lastTranslogGen = between(1, 20); int safeIndex = 0; List commitList = new ArrayList<>(); @@ -103,8 +112,9 @@ public void testAcquireIndexCommit() throws Exception { int newCommits = between(1, 10); for (int n = 0; n < newCommits; n++) { lastMaxSeqNo += between(1, 1000); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); lastTranslogGen += between(1, 20); - commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID, lastTranslogGen)); } // Advance the global checkpoint to between [safeIndex, safeIndex + 1) safeIndex = randomIntBetween(safeIndex, commitList.size() - 1); @@ -115,6 +125,9 @@ public void testAcquireIndexCommit() throws Exception { globalCheckpoint.set(randomLongBetween(lower, upper)); commitList.forEach(this::resetDeletion); indexPolicy.onCommit(commitList); + IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); // Captures and releases some commits int captures = between(0, 5); for (int n = 0; n < captures; n++) { @@ -133,7 +146,7 @@ public void testAcquireIndexCommit() throws Exception { snapshottingCommits.remove(snapshot); final long pendingSnapshots = snapshottingCommits.stream().filter(snapshot::equals).count(); final IndexCommit lastCommit = commitList.get(commitList.size() - 1); - final IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); + safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); assertThat(indexPolicy.releaseCommit(snapshot), equalTo(pendingSnapshots == 0 && snapshot.equals(lastCommit) == false && snapshot.equals(safeCommit) == false)); } @@ -144,6 +157,8 @@ public void testAcquireIndexCommit() throws Exception { equalTo(Long.parseLong(commitList.get(safeIndex).getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(Long.parseLong(commitList.get(commitList.size() - 1).getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(commitList.get(safeIndex)) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); } snapshottingCommits.forEach(indexPolicy::releaseCommit); globalCheckpoint.set(randomLongBetween(lastMaxSeqNo, Long.MAX_VALUE)); @@ -155,25 +170,27 @@ public void testAcquireIndexCommit() throws Exception { assertThat(commitList.get(commitList.size() - 1).isDeleted(), equalTo(false)); assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(lastTranslogGen)); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(lastTranslogGen)); + IndexCommit safeCommit = CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get()); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(Math.min(getLocalCheckpoint(safeCommit) + 1, globalCheckpoint.get() + 1 - extraRetainedOps))); } public void testLegacyIndex() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); long legacyTranslogGen = randomNonNegativeLong(); IndexCommit legacyCommit = mockLegacyIndexCommit(translogUUID, legacyTranslogGen); - indexPolicy.onCommit(singletonList(legacyCommit)); - verify(legacyCommit, never()).delete(); - assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(legacyTranslogGen)); - assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(legacyTranslogGen)); + assertThat(CombinedDeletionPolicy.findSafeCommitPoint(singletonList(legacyCommit), globalCheckpoint.get()), + equalTo(legacyCommit)); long safeTranslogGen = randomLongBetween(legacyTranslogGen, Long.MAX_VALUE); long maxSeqNo = randomLongBetween(1, Long.MAX_VALUE); - final IndexCommit freshCommit = mockIndexCommit(maxSeqNo, translogUUID, safeTranslogGen); + final IndexCommit freshCommit = mockIndexCommit(randomLongBetween(-1, maxSeqNo), maxSeqNo, translogUUID, safeTranslogGen); globalCheckpoint.set(randomLongBetween(0, maxSeqNo - 1)); indexPolicy.onCommit(Arrays.asList(legacyCommit, freshCommit)); @@ -190,20 +207,23 @@ public void testLegacyIndex() throws Exception { verify(freshCommit, times(0)).delete(); assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), equalTo(safeTranslogGen)); assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), equalTo(safeTranslogGen)); + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), equalTo(getLocalCheckpoint(freshCommit) + 1)); } public void testKeepSingleNoOpsCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomLong()); final UUID translogUUID = UUID.randomUUID(); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); final List commitList = new ArrayList<>(); final int numOfNoOpsCommits = between(1, 10); long lastNoopTranslogGen = 0; for (int i = 0; i < numOfNoOpsCommits; i++) { lastNoopTranslogGen += between(1, 20); - commitList.add(mockIndexCommit(SequenceNumbers.NO_OPS_PERFORMED, translogUUID, lastNoopTranslogGen)); + commitList.add( + mockIndexCommit(SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, translogUUID, lastNoopTranslogGen)); } // Keep only one no_ops commit. indexPolicy.onCommit(commitList); @@ -220,7 +240,7 @@ public void testKeepSingleNoOpsCommits() throws Exception { for (int i = 0; i < numOfGoodCommits; i++) { maxSeqNo += between(1, 1000); lastTranslogGen += between(1, 20); - commitList.add(mockIndexCommit(maxSeqNo, translogUUID, lastTranslogGen)); + commitList.add(mockIndexCommit(maxSeqNo, maxSeqNo, translogUUID, lastTranslogGen)); } // If the global checkpoint is still unassigned, we should still keep one NO_OPS_PERFORMED commit. globalCheckpoint.set(SequenceNumbers.UNASSIGNED_SEQ_NO); @@ -244,21 +264,27 @@ public void testKeepSingleNoOpsCommits() throws Exception { public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); final int invalidCommits = between(1, 10); final List commitList = new ArrayList<>(); for (int i = 0; i < invalidCommits; i++) { - commitList.add(mockIndexCommit(randomNonNegativeLong(), UUID.randomUUID(), randomNonNegativeLong())); + long maxSeqNo = randomNonNegativeLong(); + commitList.add(mockIndexCommit(randomLongBetween(-1, maxSeqNo), maxSeqNo, UUID.randomUUID(), randomNonNegativeLong())); } final UUID expectedTranslogUUID = UUID.randomUUID(); long lastTranslogGen = 0; final int validCommits = between(1, 10); + long lastMaxSeqNo = between(1, 1000); + long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo); for (int i = 0; i < validCommits; i++) { lastTranslogGen += between(1, 1000); - commitList.add(mockIndexCommit(randomNonNegativeLong(), expectedTranslogUUID, lastTranslogGen)); + lastMaxSeqNo += between(1, 1000); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, expectedTranslogUUID, lastTranslogGen)); } // We should never keep invalid commits regardless of the value of the global checkpoint. @@ -266,21 +292,26 @@ public void testDeleteInvalidCommits() throws Exception { for (int i = 0; i < invalidCommits - 1; i++) { verify(commitList.get(i), times(1)).delete(); } + assertThat(softDeletesPolicy.getMinRetainedSeqNo(), + equalTo(getLocalCheckpoint(CombinedDeletionPolicy.findSafeCommitPoint(commitList, globalCheckpoint.get())) + 1)); } public void testCheckUnreferencedCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + final SoftDeletesPolicy softDeletesPolicy = new SoftDeletesPolicy(globalCheckpoint::get, -1, 0); final UUID translogUUID = UUID.randomUUID(); final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, softDeletesPolicy, globalCheckpoint::get); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = between(1, 1000); + long lastCheckpoint = randomLongBetween(-1, lastMaxSeqNo); long lastTranslogGen = between(1, 50); for (int i = 0; i < totalCommits; i++) { lastMaxSeqNo += between(1, 10000); lastTranslogGen += between(1, 100); - commitList.add(mockIndexCommit(lastMaxSeqNo, translogUUID, lastTranslogGen)); + lastCheckpoint = randomLongBetween(lastCheckpoint, lastMaxSeqNo); + commitList.add(mockIndexCommit(lastCheckpoint, lastMaxSeqNo, translogUUID, lastTranslogGen)); } IndexCommit safeCommit = randomFrom(commitList); globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); @@ -307,8 +338,9 @@ public void testCheckUnreferencedCommits() throws Exception { } } - IndexCommit mockIndexCommit(long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { + IndexCommit mockIndexCommit(long localCheckpoint, long maxSeqNo, UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(localCheckpoint)); userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); userData.put(Translog.TRANSLOG_UUID_KEY, translogUUID.toString()); userData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGen)); @@ -329,6 +361,10 @@ void resetDeletion(IndexCommit commit) { }).when(commit).delete(); } + private long getLocalCheckpoint(IndexCommit commit) throws IOException { + return Long.parseLong(commit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + } + IndexCommit mockLegacyIndexCommit(UUID translogUUID, long translogGen) throws IOException { final Map userData = new HashMap<>(); userData.put(Translog.TRANSLOG_UUID_KEY, translogUUID.toString()); @@ -338,4 +374,5 @@ IndexCommit mockLegacyIndexCommit(UUID translogUUID, long translogGen) throws IO resetDeletion(commit); return commit; } + } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 8c03e3a41b5d6..2c5ecc8f77d26 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.engine; +import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.charset.Charset; @@ -77,10 +78,12 @@ import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; +import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PointValues; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; @@ -114,6 +117,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -133,6 +137,7 @@ import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.Mapper.BuilderContext; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; @@ -172,8 +177,10 @@ import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -247,8 +254,13 @@ public void testVersionMapAfterAutoIDDocument() throws IOException { } public void testSegments() throws Exception { + Settings settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); try (Store store = createStore(); - InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) { + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { List segments = engine.segments(false); assertThat(segments.isEmpty(), equalTo(true)); assertThat(engine.segmentsStats(false).getCount(), equalTo(0L)); @@ -1312,9 +1324,13 @@ public void testVersioningNewIndex() throws IOException { assertThat(indexResult.getVersion(), equalTo(1L)); } - public void testForceMerge() throws IOException { + public void testForceMergeWithoutSoftDeletes() throws IOException { + Settings settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); try (Store store = createStore(); - Engine engine = createEngine(config(defaultSettings, store, createTempDir(), + Engine engine = createEngine(config(IndexSettingsModule.newIndexSettings(indexMetaData), store, createTempDir(), new LogByteSizeMergePolicy(), null))) { // use log MP here we test some behavior in ESMP int numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { @@ -1355,6 +1371,165 @@ public void testForceMerge() throws IOException { } } + public void testForceMergeWithSoftDeletesRetention() throws Exception { + final long retainedExtraOps = randomLongBetween(0, 10); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final MapperService mapperService = createMapperService("test"); + final Set liveDocs = new HashSet<>(); + try (Store store = createStore(); + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { + int numDocs = scaledRandomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + } + for (int i = 0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); + if (randomBoolean()) { + engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get())); + liveDocs.remove(doc.id()); + } + if (randomBoolean()) { + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + } + if (randomBoolean()) { + engine.flush(randomBoolean(), true); + } + } + engine.flush(); + + long localCheckpoint = engine.getLocalCheckpoint(); + globalCheckpoint.set(randomLongBetween(0, localCheckpoint)); + engine.syncTranslog(); + final long safeCommitCheckpoint; + try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { + safeCommitCheckpoint = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + } + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + Map ops = readAllOperationsInLucene(engine, mapperService) + .stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); + for (long seqno = 0; seqno <= localCheckpoint; seqno++) { + long minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitCheckpoint + 1); + String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]"; + if (seqno < minSeqNoToRetain) { + Translog.Operation op = ops.get(seqno); + if (op != null) { + assertThat(op, instanceOf(Translog.Index.class)); + assertThat(msg, ((Translog.Index) op).id(), isIn(liveDocs)); + assertEquals(msg, ((Translog.Index) op).source(), B_1); + } + } else { + assertThat(msg, ops.get(seqno), notNullValue()); + } + } + settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0); + indexSettings.updateIndexMetaData(IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + engine.onSettingsChanged(); + globalCheckpoint.set(localCheckpoint); + engine.syncTranslog(); + + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocs.size())); + } + } + + public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exception { + final long retainedExtraOps = randomLongBetween(0, 10); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), retainedExtraOps); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final MapperService mapperService = createMapperService("test"); + final boolean omitSourceAllTheTime = randomBoolean(); + final Set liveDocs = new HashSet<>(); + final Set liveDocsWithSource = new HashSet<>(); + try (Store store = createStore(); + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, + globalCheckpoint::get))) { + int numDocs = scaledRandomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime; + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource); + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + if (useRecoverySource == false) { + liveDocsWithSource.add(Integer.toString(i)); + } + } + for (int i = 0; i < numDocs; i++) { + boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime; + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource); + if (randomBoolean()) { + engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get())); + liveDocs.remove(doc.id()); + liveDocsWithSource.remove(doc.id()); + } + if (randomBoolean()) { + engine.index(indexForDoc(doc)); + liveDocs.add(doc.id()); + if (useRecoverySource == false) { + liveDocsWithSource.add(doc.id()); + } else { + liveDocsWithSource.remove(doc.id()); + } + } + if (randomBoolean()) { + engine.flush(randomBoolean(), true); + } + } + engine.flush(); + globalCheckpoint.set(randomLongBetween(0, engine.getLocalCheckpoint())); + engine.syncTranslog(); + final long minSeqNoToRetain; + try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { + long safeCommitLocalCheckpoint = Long.parseLong( + safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitLocalCheckpoint + 1); + } + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + Map ops = readAllOperationsInLucene(engine, mapperService) + .stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); + for (long seqno = 0; seqno <= engine.getLocalCheckpoint(); seqno++) { + String msg = "seq# [" + seqno + "], global checkpoint [" + globalCheckpoint + "], retained-ops [" + retainedExtraOps + "]"; + if (seqno < minSeqNoToRetain) { + Translog.Operation op = ops.get(seqno); + if (op != null) { + assertThat(op, instanceOf(Translog.Index.class)); + assertThat(msg, ((Translog.Index) op).id(), isIn(liveDocs)); + } + } else { + Translog.Operation op = ops.get(seqno); + assertThat(msg, op, notNullValue()); + if (op instanceof Translog.Index) { + assertEquals(msg, ((Translog.Index) op).source(), B_1); + } + } + } + settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0); + indexSettings.updateIndexMetaData(IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + engine.onSettingsChanged(); + globalCheckpoint.set(engine.getLocalCheckpoint()); + engine.syncTranslog(); + engine.forceMerge(true, 1, false, false, false); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocsWithSource.size())); + } + } + public void testForceMergeAndClose() throws IOException, InterruptedException { int numIters = randomIntBetween(2, 10); for (int j = 0; j < numIters; j++) { @@ -1423,66 +1598,10 @@ public void testVersioningCreateExistsException() throws IOException { assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); } - protected List generateSingleDocHistory(boolean forReplica, VersionType versionType, - boolean partialOldPrimary, long primaryTerm, - int minOpCount, int maxOpCount, String docId) { - final int numOfOps = randomIntBetween(minOpCount, maxOpCount); - final List ops = new ArrayList<>(); - final Term id = newUid(docId); - final int startWithSeqNo; - if (partialOldPrimary) { - startWithSeqNo = randomBoolean() ? numOfOps - 1 : randomIntBetween(0, numOfOps - 1); - } else { - startWithSeqNo = 0; - } - final String valuePrefix = (forReplica ? "r_" : "p_" ) + docId + "_"; - final boolean incrementTermWhenIntroducingSeqNo = randomBoolean(); - for (int i = 0; i < numOfOps; i++) { - final Engine.Operation op; - final long version; - switch (versionType) { - case INTERNAL: - version = forReplica ? i : Versions.MATCH_ANY; - break; - case EXTERNAL: - version = i; - break; - case EXTERNAL_GTE: - version = randomBoolean() ? Math.max(i - 1, 0) : i; - break; - case FORCE: - version = randomNonNegativeLong(); - break; - default: - throw new UnsupportedOperationException("unknown version type: " + versionType); - } - if (randomBoolean()) { - op = new Engine.Index(id, testParsedDocument(docId, null, testDocumentWithTextField(valuePrefix + i), B_1, null), - forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, - forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, - version, - forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType, - forReplica ? REPLICA : PRIMARY, - System.currentTimeMillis(), -1, false - ); - } else { - op = new Engine.Delete("test", docId, id, - forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, - forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, - version, - forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType, - forReplica ? REPLICA : PRIMARY, - System.currentTimeMillis()); - } - ops.add(op); - } - return ops; - } - public void testOutOfOrderDocsOnReplica() throws IOException { final List ops = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE), false, 2, 2, 20, "1"); - assertOpsOnReplica(ops, replicaEngine, true); + assertOpsOnReplica(ops, replicaEngine, true, logger); } public void testOutOfOrderDocsOnReplicaOldPrimary() throws IOException { @@ -1498,74 +1617,8 @@ public void testOutOfOrderDocsOnReplicaOldPrimary() throws IOException { try (Store oldReplicaStore = createStore(); InternalEngine replicaEngine = createEngine(oldSettings, oldReplicaStore, createTempDir("translog-old-replica"), newMergePolicy())) { - final List ops = - generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), true, 2, 2, 20, "1"); - assertOpsOnReplica(ops, replicaEngine, true); - } - } - - private void assertOpsOnReplica(List ops, InternalEngine replicaEngine, boolean shuffleOps) throws IOException { - final Engine.Operation lastOp = ops.get(ops.size() - 1); - final String lastFieldValue; - if (lastOp instanceof Engine.Index) { - Engine.Index index = (Engine.Index) lastOp; - lastFieldValue = index.docs().get(0).get("value"); - } else { - // delete - lastFieldValue = null; - } - if (shuffleOps) { - int firstOpWithSeqNo = 0; - while (firstOpWithSeqNo < ops.size() && ops.get(firstOpWithSeqNo).seqNo() < 0) { - firstOpWithSeqNo++; - } - // shuffle ops but make sure legacy ops are first - shuffle(ops.subList(0, firstOpWithSeqNo), random()); - shuffle(ops.subList(firstOpWithSeqNo, ops.size()), random()); - } - boolean firstOp = true; - for (Engine.Operation op : ops) { - logger.info("performing [{}], v [{}], seq# [{}], term [{}]", - op.operationType().name().charAt(0), op.version(), op.seqNo(), op.primaryTerm()); - if (op instanceof Engine.Index) { - Engine.IndexResult result = replicaEngine.index((Engine.Index) op); - // replicas don't really care to about creation status of documents - // this allows to ignore the case where a document was found in the live version maps in - // a delete state and return false for the created flag in favor of code simplicity - // as deleted or not. This check is just signal regression so a decision can be made if it's - // intentional - assertThat(result.isCreated(), equalTo(firstOp)); - assertThat(result.getVersion(), equalTo(op.version())); - assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); - - } else { - Engine.DeleteResult result = replicaEngine.delete((Engine.Delete) op); - // Replicas don't really care to about found status of documents - // this allows to ignore the case where a document was found in the live version maps in - // a delete state and return true for the found flag in favor of code simplicity - // his check is just signal regression so a decision can be made if it's - // intentional - assertThat(result.isFound(), equalTo(firstOp == false)); - assertThat(result.getVersion(), equalTo(op.version())); - assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); - } - if (randomBoolean()) { - engine.refresh("test"); - } - if (randomBoolean()) { - engine.flush(); - engine.refresh("test"); - } - firstOp = false; - } - - assertVisibleCount(replicaEngine, lastFieldValue == null ? 0 : 1); - if (lastFieldValue != null) { - try (Searcher searcher = replicaEngine.acquireSearcher("test")) { - final TotalHitCountCollector collector = new TotalHitCountCollector(); - searcher.searcher().search(new TermQuery(new Term("value", lastFieldValue)), collector); - assertThat(collector.getTotalHits(), equalTo(1)); - } + final List ops = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), true, 2, 2, 20, "1"); + assertOpsOnReplica(ops, replicaEngine, true, logger); } } @@ -1594,11 +1647,12 @@ public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, Interrup } // randomly interleave final AtomicLong seqNoGenerator = new AtomicLong(); - Function seqNoUpdater = operation -> { - final long newSeqNo = seqNoGenerator.getAndIncrement(); + BiFunction seqNoUpdater = (operation, newSeqNo) -> { if (operation instanceof Engine.Index) { Engine.Index index = (Engine.Index) operation; - return new Engine.Index(index.uid(), index.parsedDoc(), newSeqNo, index.primaryTerm(), index.version(), + Document doc = testDocumentWithTextField(index.docs().get(0).get("value")); + ParsedDocument parsedDocument = testParsedDocument(index.id(), index.routing(), doc, index.source(), null); + return new Engine.Index(index.uid(), parsedDocument, newSeqNo, index.primaryTerm(), index.version(), index.versionType(), index.origin(), index.startTime(), index.getAutoGeneratedIdTimestamp(), index.isRetry()); } else { Engine.Delete delete = (Engine.Delete) operation; @@ -1611,12 +1665,12 @@ public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, Interrup Iterator iter2 = opsDoc2.iterator(); while (iter1.hasNext() && iter2.hasNext()) { final Engine.Operation next = randomBoolean() ? iter1.next() : iter2.next(); - allOps.add(seqNoUpdater.apply(next)); + allOps.add(seqNoUpdater.apply(next, seqNoGenerator.getAndIncrement())); } - iter1.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o))); - iter2.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o))); + iter1.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement()))); + iter2.forEachRemaining(o -> allOps.add(seqNoUpdater.apply(o, seqNoGenerator.getAndIncrement()))); // insert some duplicates - allOps.addAll(randomSubsetOf(allOps)); + randomSubsetOf(allOps).forEach(op -> allOps.add(seqNoUpdater.apply(op, op.seqNo()))); shuffle(allOps, random()); concurrentlyApplyOps(allOps, engine); @@ -1648,42 +1702,6 @@ public void testConcurrentOutOfOrderDocsOnReplica() throws IOException, Interrup assertVisibleCount(engine, totalExpectedOps); } - private void concurrentlyApplyOps(List ops, InternalEngine engine) throws InterruptedException { - Thread[] thread = new Thread[randomIntBetween(3, 5)]; - CountDownLatch startGun = new CountDownLatch(thread.length); - AtomicInteger offset = new AtomicInteger(-1); - for (int i = 0; i < thread.length; i++) { - thread[i] = new Thread(() -> { - startGun.countDown(); - try { - startGun.await(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - int docOffset; - while ((docOffset = offset.incrementAndGet()) < ops.size()) { - try { - final Engine.Operation op = ops.get(docOffset); - if (op instanceof Engine.Index) { - engine.index((Engine.Index) op); - } else { - engine.delete((Engine.Delete) op); - } - if ((docOffset + 1) % 4 == 0) { - engine.refresh("test"); - } - } catch (IOException e) { - throw new AssertionError(e); - } - } - }); - thread[i].start(); - } - for (int i = 0; i < thread.length; i++) { - thread[i].join(); - } - } - public void testInternalVersioningOnPrimary() throws IOException { final List ops = generateSingleDocHistory(false, VersionType.INTERNAL, false, 2, 2, 20, "1"); assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); @@ -1894,7 +1912,7 @@ public void testVersioningPromotedReplica() throws IOException { final boolean deletedOnReplica = lastReplicaOp instanceof Engine.Delete; final long finalReplicaVersion = lastReplicaOp.version(); final long finalReplicaSeqNo = lastReplicaOp.seqNo(); - assertOpsOnReplica(replicaOps, replicaEngine, true); + assertOpsOnReplica(replicaOps, replicaEngine, true, logger); final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine); final long currentSeqNo = getSequenceID(replicaEngine, new Engine.Get(false, false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); @@ -2699,14 +2717,16 @@ public void testSkipTranslogReplay() throws IOException { Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); } + EngineConfig config = engine.config(); assertVisibleCount(engine, numDocs); engine.close(); - trimUnsafeCommits(engine.config()); - engine = new InternalEngine(engine.config()); - engine.skipTranslogRecovery(); - try (Engine.Searcher searcher = engine.acquireSearcher("test")) { - TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); - assertThat(topDocs.totalHits, equalTo(0L)); + trimUnsafeCommits(config); + try (InternalEngine engine = new InternalEngine(config)) { + engine.skipTranslogRecovery(); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); + assertThat(topDocs.totalHits, equalTo(0L)); + } } } @@ -2836,7 +2856,7 @@ public void testRecoverFromForeignTranslog() throws IOException { new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm::get); + new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm::get, tombstoneDocSupplier()); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); fail("translog belongs to a different engine"); @@ -2965,6 +2985,12 @@ private void maybeThrowFailure() throws IOException { } } + @Override + public long softUpdateDocument(Term term, Iterable doc, Field... softDeletes) throws IOException { + maybeThrowFailure(); + return super.softUpdateDocument(term, doc, softDeletes); + } + @Override public long deleteDocuments(Term... terms) throws IOException { maybeThrowFailure(); @@ -3165,10 +3191,10 @@ public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException } public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { - final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), + final Supplier doc = () -> testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); - Engine.Index operation = appendOnlyReplica(doc, false, 1, randomIntBetween(0, 5)); - Engine.Index retry = appendOnlyReplica(doc, true, 1, randomIntBetween(0, 5)); + Engine.Index operation = appendOnlyReplica(doc.get(), false, 1, randomIntBetween(0, 5)); + Engine.Index retry = appendOnlyReplica(doc.get(), true, 1, randomIntBetween(0, 5)); // operations with a seq# equal or lower to the local checkpoint are not indexed to lucene // and the version lookup is skipped final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; @@ -3207,8 +3233,8 @@ public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); assertEquals(1, topDocs.totalHits); } - operation = randomAppendOnly(doc, false, 1); - retry = randomAppendOnly(doc, true, 1); + operation = randomAppendOnly(doc.get(), false, 1); + retry = randomAppendOnly(doc.get(), true, 1); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); assertNotNull(indexResult.getTranslogLocation()); @@ -3273,6 +3299,8 @@ public void testDoubleDeliveryReplica() throws IOException { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); assertEquals(1, topDocs.totalHits); } + List ops = readAllOperationsInLucene(engine, createMapperService("test")); + assertThat(ops.stream().map(o -> o.seqNo()).collect(Collectors.toList()), hasItem(20L)); } public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException { @@ -3740,20 +3768,22 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio final List operations = new ArrayList<>(); final int numberOfOperations = randomIntBetween(16, 32); - final Document document = testDocumentWithTextField(); final AtomicLong sequenceNumber = new AtomicLong(); final Engine.Operation.Origin origin = randomFrom(LOCAL_TRANSLOG_RECOVERY, PEER_RECOVERY, PRIMARY, REPLICA); final LongSupplier sequenceNumberSupplier = origin == PRIMARY ? () -> SequenceNumbers.UNASSIGNED_SEQ_NO : sequenceNumber::getAndIncrement; - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - final ParsedDocument doc = testParsedDocument("1", null, document, B_1, null); - final Term uid = newUid(doc); + final Supplier doc = () -> { + final Document document = testDocumentWithTextField(); + document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); + return testParsedDocument("1", null, document, B_1, null); + }; + final Term uid = newUid("1"); final BiFunction searcherFactory = engine::acquireSearcher; for (int i = 0; i < numberOfOperations; i++) { if (randomBoolean()) { final Engine.Index index = new Engine.Index( uid, - doc, + doc.get(), sequenceNumberSupplier.getAsLong(), 1, i, @@ -3829,7 +3859,9 @@ public void testNoOps() throws IOException { maxSeqNo, localCheckpoint); trimUnsafeCommits(engine.config()); - noOpEngine = new InternalEngine(engine.config(), supplier) { + EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, + () -> new MatchAllDocsQuery(), engine.config().getMergePolicy())); + noOpEngine = new InternalEngine(noopEngineConfig, supplier) { @Override protected long doGenerateSeqNoForOperation(Operation operation) { throw new UnsupportedOperationException(); @@ -3837,7 +3869,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { }; noOpEngine.recoverFromTranslog(Long.MAX_VALUE); final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); - final String reason = randomAlphaOfLength(16); + final String reason = "filling gaps"; noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled)); @@ -3859,11 +3891,77 @@ protected long doGenerateSeqNoForOperation(Operation operation) { assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 2))); assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get())); assertThat(noOp.reason(), equalTo(reason)); + if (engine.engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + MapperService mapperService = createMapperService("test"); + List operationsFromLucene = readAllOperationsInLucene(noOpEngine, mapperService); + assertThat(operationsFromLucene, hasSize(maxSeqNo + 2 - localCheckpoint)); // fills n gap and 2 manual noop. + for (int i = 0; i < operationsFromLucene.size(); i++) { + assertThat(operationsFromLucene.get(i), equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps"))); + } + assertConsistentHistoryBetweenTranslogAndLuceneIndex(noOpEngine, mapperService); + } } finally { IOUtils.close(noOpEngine); } } + /** + * Verifies that a segment containing only no-ops can be used to look up _version and _seqno. + */ + public void testSegmentContainsOnlyNoOps() throws Exception { + Engine.NoOpResult noOpResult = engine.noOp(new Engine.NoOp(1, primaryTerm.get(), + randomFrom(Engine.Operation.Origin.values()), randomNonNegativeLong(), "test")); + assertThat(noOpResult.getFailure(), nullValue()); + engine.refresh("test"); + Engine.DeleteResult deleteResult = engine.delete(replicaDeleteForDoc("id", 1, 2, randomNonNegativeLong())); + assertThat(deleteResult.getFailure(), nullValue()); + engine.refresh("test"); + } + + /** + * A simple test to check that random combination of operations can coexist in segments and be lookup. + * This is needed as some fields in Lucene may not exist if a segment misses operation types and this code is to check for that. + * For example, a segment containing only no-ops does not have neither _uid or _version. + */ + public void testRandomOperations() throws Exception { + int numOps = between(10, 100); + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(1, 10)); + ParsedDocument doc = createParsedDoc(id, null); + Engine.Operation.TYPE type = randomFrom(Engine.Operation.TYPE.values()); + switch (type) { + case INDEX: + Engine.IndexResult index = engine.index(replicaIndexForDoc(doc, between(1, 100), i, randomBoolean())); + assertThat(index.getFailure(), nullValue()); + break; + case DELETE: + Engine.DeleteResult delete = engine.delete(replicaDeleteForDoc(doc.id(), between(1, 100), i, randomNonNegativeLong())); + assertThat(delete.getFailure(), nullValue()); + break; + case NO_OP: + Engine.NoOpResult noOp = engine.noOp(new Engine.NoOp(i, primaryTerm.get(), + randomFrom(Engine.Operation.Origin.values()), randomNonNegativeLong(), "")); + assertThat(noOp.getFailure(), nullValue()); + break; + default: + throw new IllegalStateException("Invalid op [" + type + "]"); + } + if (randomBoolean()) { + engine.refresh("test"); + } + if (randomBoolean()) { + engine.flush(); + } + if (randomBoolean()) { + engine.forceMerge(randomBoolean(), between(1, 10), randomBoolean(), false, false); + } + } + if (engine.engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + List operations = readAllOperationsInLucene(engine, createMapperService("test")); + assertThat(operations, hasSize(numOps)); + } + } + public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierException, InterruptedException { engine.close(); final int numberOfTriplets = randomIntBetween(1, 32); @@ -4429,7 +4527,7 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { globalCheckpoint.set(randomLongBetween(engine.getLocalCheckpoint(), Long.MAX_VALUE)); engine.syncTranslog(); assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); - assertThat(engine.estimateTranslogOperationsFromMinSeq(0L), equalTo(0)); + assertThat(engine.getTranslog().totalOperations(), equalTo(0)); } } @@ -4792,6 +4890,154 @@ public void testTrimUnsafeCommits() throws Exception { } } + public void testLuceneHistoryOnPrimary() throws Exception { + final List operations = generateSingleDocHistory(false, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 10, 300, "id"); + assertOperationHistoryInLucene(operations); + } + + public void testLuceneHistoryOnReplica() throws Exception { + final List operations = generateSingleDocHistory(true, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 10, 300, "id"); + Randomness.shuffle(operations); + assertOperationHistoryInLucene(operations); + } + + private void assertOperationHistoryInLucene(List operations) throws IOException { + final MergePolicy keepSoftDeleteDocsMP = new SoftDeletesRetentionMergePolicy( + Lucene.SOFT_DELETES_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy()); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + Set expectedSeqNos = new HashSet<>(); + try (Store store = createStore(); + Engine engine = createEngine(config(indexSettings, store, createTempDir(), keepSoftDeleteDocsMP, null))) { + for (Engine.Operation op : operations) { + if (op instanceof Engine.Index) { + Engine.IndexResult indexResult = engine.index((Engine.Index) op); + assertThat(indexResult.getFailure(), nullValue()); + expectedSeqNos.add(indexResult.getSeqNo()); + } else { + Engine.DeleteResult deleteResult = engine.delete((Engine.Delete) op); + assertThat(deleteResult.getFailure(), nullValue()); + expectedSeqNos.add(deleteResult.getSeqNo()); + } + if (rarely()) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); + } + if (rarely()) { + engine.forceMerge(true); + } + } + MapperService mapperService = createMapperService("test"); + List actualOps = readAllOperationsInLucene(engine, mapperService); + assertThat(actualOps.stream().map(o -> o.seqNo()).collect(Collectors.toList()), containsInAnyOrder(expectedSeqNos.toArray())); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); + } + } + + public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { + IOUtils.close(engine, store); + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); + final IndexMetaData indexMetaData = IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetaData); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final List operations = generateSingleDocHistory(true, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 10, 300, "2"); + Randomness.shuffle(operations); + Set existingSeqNos = new HashSet<>(); + store = createStore(); + engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get)); + assertThat(engine.getMinRetainedSeqNo(), equalTo(0L)); + long lastMinRetainedSeqNo = engine.getMinRetainedSeqNo(); + for (Engine.Operation op : operations) { + final Engine.Result result; + if (op instanceof Engine.Index) { + result = engine.index((Engine.Index) op); + } else { + result = engine.delete((Engine.Delete) op); + } + existingSeqNos.add(result.getSeqNo()); + if (randomBoolean()) { + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpointTracker().getCheckpoint())); + } + if (rarely()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomLongBetween(0, 10)); + indexSettings.updateIndexMetaData(IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(settings).build()); + engine.onSettingsChanged(); + } + if (rarely()) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(true, true); + assertThat(Long.parseLong(engine.getLastCommittedSegmentInfos().userData.get(Engine.MIN_RETAINED_SEQNO)), + equalTo(engine.getMinRetainedSeqNo())); + } + if (rarely()) { + engine.forceMerge(randomBoolean()); + } + try (Closeable ignored = engine.acquireRetentionLockForPeerRecovery()) { + long minRetainSeqNos = engine.getMinRetainedSeqNo(); + assertThat(minRetainSeqNos, lessThanOrEqualTo(globalCheckpoint.get() + 1)); + Long[] expectedOps = existingSeqNos.stream().filter(seqno -> seqno >= minRetainSeqNos).toArray(Long[]::new); + Set actualOps = readAllOperationsInLucene(engine, createMapperService("test")).stream() + .map(Translog.Operation::seqNo).collect(Collectors.toSet()); + assertThat(actualOps, containsInAnyOrder(expectedOps)); + } + try (Engine.IndexCommitRef commitRef = engine.acquireSafeIndexCommit()) { + IndexCommit safeCommit = commitRef.getIndexCommit(); + if (safeCommit.getUserData().containsKey(Engine.MIN_RETAINED_SEQNO)) { + lastMinRetainedSeqNo = Long.parseLong(safeCommit.getUserData().get(Engine.MIN_RETAINED_SEQNO)); + } + } + } + if (randomBoolean()) { + engine.close(); + } else { + engine.flushAndClose(); + } + trimUnsafeCommits(engine.config()); + try (InternalEngine recoveringEngine = new InternalEngine(engine.config())) { + assertThat(recoveringEngine.getMinRetainedSeqNo(), equalTo(lastMinRetainedSeqNo)); + } + } + + public void testLastRefreshCheckpoint() throws Exception { + AtomicBoolean done = new AtomicBoolean(); + Thread[] refreshThreads = new Thread[between(1, 8)]; + CountDownLatch latch = new CountDownLatch(refreshThreads.length); + for (int i = 0; i < refreshThreads.length; i++) { + latch.countDown(); + refreshThreads[i] = new Thread(() -> { + while (done.get() == false) { + long checkPointBeforeRefresh = engine.getLocalCheckpoint(); + engine.refresh("test", randomFrom(Engine.SearcherScope.values())); + assertThat(engine.lastRefreshedCheckpoint(), greaterThanOrEqualTo(checkPointBeforeRefresh)); + } + }); + refreshThreads[i].start(); + } + latch.await(); + List ops = generateSingleDocHistory(true, VersionType.EXTERNAL, false, 1, 10, 1000, "1"); + concurrentlyApplyOps(ops, engine); + done.set(true); + for (Thread thread : refreshThreads) { + thread.join(); + } + engine.refresh("test"); + assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint())); + } + private static void trimUnsafeCommits(EngineConfig config) throws IOException { final Store store = config.getStore(); final TranslogConfig translogConfig = config.getTranslogConfig(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java new file mode 100644 index 0000000000000..cf4924f70b475 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -0,0 +1,289 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.SnapshotMatchers; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.IndexSettingsModule; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class LuceneChangesSnapshotTests extends EngineTestCase { + private MapperService mapperService; + + @Before + public void createMapper() throws Exception { + mapperService = createMapperService("test"); + } + + @Override + protected Settings indexSettings() { + return Settings.builder().put(super.indexSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) // always enable soft-deletes + .build(); + } + + public void testBasics() throws Exception { + long fromSeqNo = randomNonNegativeLong(); + long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); + // Empty engine + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat(error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); + } + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, false)) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + int numOps = between(1, 100); + int refreshedSeqNo = -1; + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(i, i + 5)); + ParsedDocument doc = createParsedDoc(id, null, randomBoolean()); + if (randomBoolean()) { + engine.index(indexForDoc(doc)); + } else { + engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get())); + } + if (rarely()) { + if (randomBoolean()) { + engine.flush(); + } else { + engine.refresh("test"); + } + refreshedSeqNo = i; + } + } + if (refreshedSeqNo == -1) { + fromSeqNo = between(0, numOps); + toSeqNo = randomLongBetween(fromSeqNo, numOps * 2); + + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, false)) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.size(0)); + } finally { + IOUtils.close(searcher); + } + + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, true)) { + searcher = null; + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat(error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); + }finally { + IOUtils.close(searcher); + } + } else { + fromSeqNo = randomLongBetween(0, refreshedSeqNo); + toSeqNo = randomLongBetween(refreshedSeqNo + 1, numOps * 2); + Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, false)) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, refreshedSeqNo)); + } finally { + IOUtils.close(searcher); + } + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, true)) { + searcher = null; + IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); + assertThat(error.getMessage(), + containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")); + }finally { + IOUtils.close(searcher); + } + toSeqNo = randomLongBetween(fromSeqNo, refreshedSeqNo); + searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + try (Translog.Snapshot snapshot = new LuceneChangesSnapshot( + searcher, mapperService, between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE), fromSeqNo, toSeqNo, true)) { + searcher = null; + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); + } finally { + IOUtils.close(searcher); + } + } + // Get snapshot via engine will auto refresh + fromSeqNo = randomLongBetween(0, numOps - 1); + toSeqNo = randomLongBetween(fromSeqNo, numOps - 1); + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, randomBoolean())) { + assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo)); + } + } + + public void testDedupByPrimaryTerm() throws Exception { + Map latestOperations = new HashMap<>(); + List terms = Arrays.asList(between(1, 1000), between(1000, 2000)); + int totalOps = 0; + for (long term : terms) { + final List ops = generateSingleDocHistory(true, + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE), false, term, 2, 20, "1"); + primaryTerm.set(Math.max(primaryTerm.get(), term)); + engine.rollTranslogGeneration(); + for (Engine.Operation op : ops) { + // We need to simulate a rollback here as only ops after local checkpoint get into the engine + if (op.seqNo() <= engine.getLocalCheckpointTracker().getCheckpoint()) { + engine.getLocalCheckpointTracker().resetCheckpoint(randomLongBetween(-1, op.seqNo() - 1)); + engine.rollTranslogGeneration(); + } + if (op instanceof Engine.Index) { + engine.index((Engine.Index) op); + } else if (op instanceof Engine.Delete) { + engine.delete((Engine.Delete) op); + } + latestOperations.put(op.seqNo(), op.primaryTerm()); + if (rarely()) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); + } + totalOps++; + } + } + long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo(); + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, 0, maxSeqNo, false)) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + assertThat(op.toString(), op.primaryTerm(), equalTo(latestOperations.get(op.seqNo()))); + } + assertThat(snapshot.skippedOperations(), equalTo(totalOps - latestOperations.size())); + } + } + + public void testUpdateAndReadChangesConcurrently() throws Exception { + Follower[] followers = new Follower[between(1, 3)]; + CountDownLatch readyLatch = new CountDownLatch(followers.length + 1); + AtomicBoolean isDone = new AtomicBoolean(); + for (int i = 0; i < followers.length; i++) { + followers[i] = new Follower(engine, isDone, readyLatch); + followers[i].start(); + } + boolean onPrimary = randomBoolean(); + List operations = new ArrayList<>(); + int numOps = scaledRandomIntBetween(1, 1000); + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(randomIntBetween(1, 10)); + ParsedDocument doc = createParsedDoc(id, randomAlphaOfLengthBetween(1, 5), randomBoolean()); + final Engine.Operation op; + if (onPrimary) { + if (randomBoolean()) { + op = new Engine.Index(newUid(doc), primaryTerm.get(), doc); + } else { + op = new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get()); + } + } else { + if (randomBoolean()) { + op = replicaIndexForDoc(doc, randomNonNegativeLong(), i, randomBoolean()); + } else { + op = replicaDeleteForDoc(doc.id(), randomNonNegativeLong(), i, randomNonNegativeLong()); + } + } + operations.add(op); + } + readyLatch.countDown(); + concurrentlyApplyOps(operations, engine); + assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(operations.size() - 1L)); + isDone.set(true); + for (Follower follower : followers) { + follower.join(); + } + } + + class Follower extends Thread { + private final Engine leader; + private final TranslogHandler translogHandler; + private final AtomicBoolean isDone; + private final CountDownLatch readLatch; + + Follower(Engine leader, AtomicBoolean isDone, CountDownLatch readLatch) { + this.leader = leader; + this.isDone = isDone; + this.readLatch = readLatch; + this.translogHandler = new TranslogHandler(xContentRegistry(), IndexSettingsModule.newIndexSettings(shardId.getIndexName(), + engine.engineConfig.getIndexSettings().getSettings())); + } + + void pullOperations(Engine follower) throws IOException { + long leaderCheckpoint = leader.getLocalCheckpoint(); + long followerCheckpoint = follower.getLocalCheckpoint(); + if (followerCheckpoint < leaderCheckpoint) { + long fromSeqNo = followerCheckpoint + 1; + long batchSize = randomLongBetween(0, 100); + long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint); + try (Translog.Snapshot snapshot = leader.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) { + translogHandler.run(follower, snapshot); + } + } + } + + @Override + public void run() { + try (Store store = createStore(); + InternalEngine follower = createEngine(store, createTempDir())) { + readLatch.countDown(); + readLatch.await(); + while (isDone.get() == false || + follower.getLocalCheckpointTracker().getCheckpoint() < leader.getLocalCheckpoint()) { + pullOperations(follower); + } + assertConsistentHistoryBetweenTranslogAndLuceneIndex(follower, mapperService); + assertThat(getDocIds(follower, true), equalTo(getDocIds(leader, true))); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + } + + private List drainAll(Translog.Snapshot snapshot) throws IOException { + List operations = new ArrayList<>(); + Translog.Operation op; + while ((op = snapshot.next()) != null) { + final Translog.Operation newOp = op; + logger.error("Reading [{}]", op); + assert operations.stream().allMatch(o -> o.seqNo() < newOp.seqNo()) : "Operations [" + operations + "], op [" + op + "]"; + operations.add(newOp); + } + return operations; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java new file mode 100644 index 0000000000000..c46b47b87d06e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicyTests.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.CodecReader; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.StandardDirectoryReader; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.NullInfoStream; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; +import java.util.stream.Collectors; + +public class RecoverySourcePruneMergePolicyTests extends ESTestCase { + + public void testPruneAll() throws IOException { + try (Directory dir = newDirectory()) { + IndexWriterConfig iwc = newIndexWriterConfig(); + RecoverySourcePruneMergePolicy mp = new RecoverySourcePruneMergePolicy("extra_source", MatchNoDocsQuery::new, + newLogMergePolicy()); + iwc.setMergePolicy(mp); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + for (int i = 0; i < 20; i++) { + if (i > 0 && randomBoolean()) { + writer.flush(); + } + Document doc = new Document(); + doc.add(new StoredField("source", "hello world")); + doc.add(new StoredField("extra_source", "hello world")); + doc.add(new NumericDocValuesField("extra_source", 1)); + writer.addDocument(doc); + } + writer.forceMerge(1); + writer.commit(); + try (DirectoryReader reader = DirectoryReader.open(writer)) { + for (int i = 0; i < reader.maxDoc(); i++) { + Document document = reader.document(i); + assertEquals(1, document.getFields().size()); + assertEquals("source", document.getFields().get(0).name()); + } + assertEquals(1, reader.leaves().size()); + LeafReader leafReader = reader.leaves().get(0).reader(); + NumericDocValues extra_source = leafReader.getNumericDocValues("extra_source"); + if (extra_source != null) { + assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); + } + if (leafReader instanceof CodecReader && reader instanceof StandardDirectoryReader) { + CodecReader codecReader = (CodecReader) leafReader; + StandardDirectoryReader sdr = (StandardDirectoryReader) reader; + SegmentInfos segmentInfos = sdr.getSegmentInfos(); + MergePolicy.MergeSpecification forcedMerges = mp.findForcedDeletesMerges(segmentInfos, + new MergePolicy.MergeContext() { + @Override + public int numDeletesToMerge(SegmentCommitInfo info) { + return info.info.maxDoc() - 1; + } + + @Override + public int numDeletedDocs(SegmentCommitInfo info) { + return info.info.maxDoc() - 1; + } + + @Override + public InfoStream getInfoStream() { + return new NullInfoStream(); + } + + @Override + public Set getMergingSegments() { + return Collections.emptySet(); + } + }); + // don't wrap if there is nothing to do + assertSame(codecReader, forcedMerges.merges.get(0).wrapForMerge(codecReader)); + } + } + } + } + } + + + public void testPruneSome() throws IOException { + try (Directory dir = newDirectory()) { + IndexWriterConfig iwc = newIndexWriterConfig(); + iwc.setMergePolicy(new RecoverySourcePruneMergePolicy("extra_source", + () -> new TermQuery(new Term("even", "true")), iwc.getMergePolicy())); + try (IndexWriter writer = new IndexWriter(dir, iwc)) { + for (int i = 0; i < 20; i++) { + if (i > 0 && randomBoolean()) { + writer.flush(); + } + Document doc = new Document(); + doc.add(new StringField("even", Boolean.toString(i % 2 == 0), Field.Store.YES)); + doc.add(new StoredField("source", "hello world")); + doc.add(new StoredField("extra_source", "hello world")); + doc.add(new NumericDocValuesField("extra_source", 1)); + writer.addDocument(doc); + } + writer.forceMerge(1); + writer.commit(); + try (DirectoryReader reader = DirectoryReader.open(writer)) { + assertEquals(1, reader.leaves().size()); + NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source"); + assertNotNull(extra_source); + for (int i = 0; i < reader.maxDoc(); i++) { + Document document = reader.document(i); + Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); + assertTrue(collect.contains("source")); + assertTrue(collect.contains("even")); + if (collect.size() == 3) { + assertTrue(collect.contains("extra_source")); + assertEquals("true", document.getField("even").stringValue()); + assertEquals(i, extra_source.nextDoc()); + } else { + assertEquals(2, document.getFields().size()); + } + } + assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); + } + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java new file mode 100644 index 0000000000000..f359010038284 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import static org.hamcrest.Matchers.equalTo; + +public class SoftDeletesPolicyTests extends ESTestCase { + /** + * Makes sure we won't advance the retained seq# if the retention lock is held + */ + public void testSoftDeletesRetentionLock() { + long retainedOps = between(0, 10000); + AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + long safeCommitCheckpoint = globalCheckpoint.get(); + SoftDeletesPolicy policy = new SoftDeletesPolicy(globalCheckpoint::get, between(1, 10000), retainedOps); + long minRetainedSeqNo = policy.getMinRetainedSeqNo(); + List locks = new ArrayList<>(); + int iters = scaledRandomIntBetween(10, 1000); + for (int i = 0; i < iters; i++) { + if (randomBoolean()) { + locks.add(policy.acquireRetentionLock()); + } + // Advances the global checkpoint and the local checkpoint of a safe commit + globalCheckpoint.addAndGet(between(0, 1000)); + safeCommitCheckpoint = randomLongBetween(safeCommitCheckpoint, globalCheckpoint.get()); + policy.setLocalCheckpointOfSafeCommit(safeCommitCheckpoint); + if (rarely()) { + retainedOps = between(0, 10000); + policy.setRetentionOperations(retainedOps); + } + // Release some locks + List releasingLocks = randomSubsetOf(locks); + locks.removeAll(releasingLocks); + releasingLocks.forEach(Releasable::close); + + // We only expose the seqno to the merge policy if the retention lock is not held. + policy.getRetentionQuery(); + if (locks.isEmpty()) { + long retainedSeqNo = Math.min(safeCommitCheckpoint, globalCheckpoint.get() - retainedOps) + 1; + minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo); + } + assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo)); + } + + locks.forEach(Releasable::close); + long retainedSeqNo = Math.min(safeCommitCheckpoint, globalCheckpoint.get() - retainedOps) + 1; + minRetainedSeqNo = Math.max(minRetainedSeqNo, retainedSeqNo); + assertThat(policy.getMinRetainedSeqNo(), equalTo(minRetainedSeqNo)); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 198a8296de059..9e7b41673ad9d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -338,15 +339,18 @@ DocumentMapper createDummyMapping(MapperService mapperService) throws Exception // creates an object mapper, which is about 100x harder than it should be.... ObjectMapper createObjectMapper(MapperService mapperService, String name) throws Exception { - ParseContext context = new ParseContext.InternalParseContext( - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); + ParseContext context = new ParseContext.InternalParseContext(settings, mapperService.documentMapperParser(), mapperService.documentMapper("type"), null, null); String[] nameParts = name.split("\\."); for (int i = 0; i < nameParts.length - 1; ++i) { context.path().add(nameParts[i]); } Mapper.Builder builder = new ObjectMapper.Builder(nameParts[nameParts.length - 1]).enabled(true); - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings().getSettings(), context.path()); return (ObjectMapper)builder.build(builderContext); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index c5c99926610f1..be9c00831803d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.BooleanFieldMapper.BooleanFieldType; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; @@ -212,7 +213,10 @@ private String serialize(ToXContent mapper) throws Exception { } private Mapper parse(DocumentMapper mapper, DocumentMapperParser parser, XContentBuilder builder) throws Exception { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetaData build = IndexMetaData.builder("") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); + IndexSettings settings = new IndexSettings(build, Settings.EMPTY); SourceToParse source = SourceToParse.source("test", mapper.type(), "some_id", BytesReference.bytes(builder), builder.contentType()); try (XContentParser xContentParser = createParser(JsonXContent.jsonXContent, source.source())) { ParseContext.InternalParseContext ctx = new ParseContext.InternalParseContext(settings, parser, mapper, source, xContentParser); diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 453c0bd91c479..c6f6842cbe904 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.replication; +import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -42,6 +43,7 @@ import org.elasticsearch.index.engine.InternalEngineTests; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -141,7 +143,9 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa } public void testInheritMaxValidAutoIDTimestampOnRecovery() throws Exception { - try (ReplicationGroup shards = createGroup(0)) { + //TODO: Enables this test with soft-deletes once we have timestamp + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + try (ReplicationGroup shards = createGroup(0, settings)) { shards.startAll(); final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON); indexRequest.onRetry(); // force an update of the timestamp @@ -347,7 +351,13 @@ public void testDocumentFailureReplication() throws Exception { final AtomicBoolean throwAfterIndexedOneDoc = new AtomicBoolean(); // need one document to trigger delete in IW. @Override public long addDocument(Iterable doc) throws IOException { - if (throwAfterIndexedOneDoc.getAndSet(true)) { + boolean isTombstone = false; + for (IndexableField field : doc) { + if (SeqNoFieldMapper.TOMBSTONE_NAME.equals(field.name())) { + isTombstone = true; + } + } + if (isTombstone == false && throwAfterIndexedOneDoc.getAndSet(true)) { throw indexException; } else { return super.addDocument(doc); @@ -357,6 +367,10 @@ public long addDocument(Iterable doc) throws IOExcepti public long deleteDocuments(Term... terms) throws IOException { throw deleteException; } + @Override + public long softUpdateDocument(Term term, Iterable doc, Field...fields) throws IOException { + throw deleteException; // a delete uses softUpdateDocument API if soft-deletes enabled + } }, null, null, config); try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(0)) { @Override @@ -393,6 +407,9 @@ public long deleteDocuments(Term... terms) throws IOException { try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); } + try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } } // unlike previous failures, these two failures replicated directly from the replication channel. indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); @@ -407,6 +424,9 @@ public long deleteDocuments(Term... terms) throws IOException { try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); } + try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } } shards.assertAllEqual(1); } @@ -504,8 +524,9 @@ public void testSeqNoCollision() throws Exception { recoverReplica(replica3, replica2, true); try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); - assertThat(snapshot.next(), equalTo(op2)); - assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations)); + final List expectedOps = new ArrayList<>(initOperations); + expectedOps.add(op2); + assertThat(snapshot, containsOperationsInAnyOrder(expectedOps)); assertThat("Peer-recovery should not send overridden operations", snapshot.skippedOperations(), equalTo(0)); } // TODO: We should assert the content of shards in the ReplicationGroup. diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 37a52a76f8e5b..6ca1b6bfb68af 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -98,7 +98,8 @@ public void testIndexingDuringFileRecovery() throws Exception { } public void testRecoveryOfDisconnectedReplica() throws Exception { - try (ReplicationGroup shards = createGroup(1)) { + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); int docs = shards.indexDocs(randomInt(50)); shards.flush(); @@ -267,6 +268,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { builder.settings(Settings.builder().put(newPrimary.indexSettings().getSettings()) .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), "-1") .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) ); newPrimary.indexSettings().updateIndexMetaData(builder.build()); newPrimary.onSettingsChanged(); @@ -276,7 +278,12 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { shards.syncGlobalCheckpoint(); assertThat(newPrimary.getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); }); - newPrimary.flush(new FlushRequest()); + newPrimary.flush(new FlushRequest().force(true)); + if (replica.indexSettings().isSoftDeleteEnabled()) { + // We need an extra flush to advance the min_retained_seqno on the new primary so ops-based won't happen. + // The min_retained_seqno only advances when a merge asks for the retention query. + newPrimary.flush(new FlushRequest().force(true)); + } uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); totalDocs += uncommittedOpsOnPrimary; } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 83d930c3fdba8..6270edf8dde47 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -31,6 +32,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.elasticsearch.Assertions; import org.elasticsearch.Version; @@ -89,8 +91,13 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -165,6 +172,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; @@ -242,7 +250,8 @@ public void testFailShard() throws Exception { assertNotNull(shardPath); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); - closeShards(shard); + shard.close("do not assert history", false); + shard.store().close(); // check state file still exists ShardStateMetaData shardStateMetaData = load(logger, shardPath.getShardStatePath()); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); @@ -2398,7 +2407,8 @@ public void testRecoverFromLocalShard() throws IOException { public void testDocStats() throws IOException, InterruptedException { IndexShard indexShard = null; try { - indexShard = newStartedShard(); + indexShard = newStartedShard(randomBoolean(), + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0).build()); final long numDocs = randomIntBetween(2, 32); // at least two documents so we have docs to delete final long numDocsToDelete = randomLongBetween(1, numDocs); for (int i = 0; i < numDocs; i++) { @@ -2428,7 +2438,16 @@ public void testDocStats() throws IOException, InterruptedException { deleteDoc(indexShard, "_doc", id); indexDoc(indexShard, "_doc", id); } - + // Need to update and sync the global checkpoint as the soft-deletes retention MergePolicy depends on it. + if (indexShard.indexSettings.isSoftDeleteEnabled()) { + if (indexShard.routingEntry().primary()) { + indexShard.updateGlobalCheckpointForShard(indexShard.routingEntry().allocationId().getId(), + indexShard.getLocalCheckpoint()); + } else { + indexShard.updateGlobalCheckpointOnReplica(indexShard.getLocalCheckpoint(), "test"); + } + indexShard.sync(); + } // flush the buffered deletes final FlushRequest flushRequest = new FlushRequest(); flushRequest.force(false); @@ -2971,6 +2990,7 @@ public void testSegmentMemoryTrackedInBreaker() throws Exception { assertThat(breaker.getUsed(), greaterThan(preRefreshBytes)); indexDoc(primary, "_doc", "4", "{\"foo\": \"potato\"}"); + indexDoc(primary, "_doc", "5", "{\"foo\": \"potato\"}"); // Forces a refresh with the INTERNAL scope ((InternalEngine) primary.getEngine()).writeIndexingBuffer(); @@ -2982,6 +3002,13 @@ public void testSegmentMemoryTrackedInBreaker() throws Exception { // Deleting a doc causes its memory to be freed from the breaker deleteDoc(primary, "_doc", "0"); + // Here we are testing that a fully deleted segment should be dropped and its memory usage is freed. + // In order to instruct the merge policy not to keep a fully deleted segment, + // we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything. + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { + primary.sync(); + flushShard(primary); + } primary.refresh("force refresh"); ss = primary.segmentStats(randomBoolean()); @@ -3073,6 +3100,7 @@ public void testSegmentMemoryTrackedWithRandomSearchers() throws Exception { // Close remaining searchers IOUtils.close(searchers); + primary.refresh("test"); SegmentsStats ss = primary.segmentStats(randomBoolean()); CircuitBreaker breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); @@ -3190,4 +3218,28 @@ public void testOnCloseStats() throws IOException { } + public void testSupplyTombstoneDoc() throws Exception { + IndexShard shard = newStartedShard(); + String id = randomRealisticUnicodeOfLengthBetween(1, 10); + ParsedDocument deleteTombstone = shard.getEngine().config().getTombstoneDocSupplier().newDeleteTombstoneDoc("doc", id); + assertThat(deleteTombstone.docs(), hasSize(1)); + ParseContext.Document deleteDoc = deleteTombstone.docs().get(0); + assertThat(deleteDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), + containsInAnyOrder(IdFieldMapper.NAME, VersionFieldMapper.NAME, + SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME, SeqNoFieldMapper.TOMBSTONE_NAME)); + assertThat(deleteDoc.getField(IdFieldMapper.NAME).binaryValue(), equalTo(Uid.encodeId(id))); + assertThat(deleteDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); + + final String reason = randomUnicodeOfLength(200); + ParsedDocument noopTombstone = shard.getEngine().config().getTombstoneDocSupplier().newNoopTombstoneDoc(reason); + assertThat(noopTombstone.docs(), hasSize(1)); + ParseContext.Document noopDoc = noopTombstone.docs().get(0); + assertThat(noopDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()), + containsInAnyOrder(VersionFieldMapper.NAME, SourceFieldMapper.NAME, SeqNoFieldMapper.TOMBSTONE_NAME, + SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME)); + assertThat(noopDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L)); + assertThat(noopDoc.getField(SourceFieldMapper.NAME).binaryValue(), equalTo(new BytesRef(reason))); + + closeShards(shard); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index ae2cc84e4870c..29b16ca28f4da 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -106,17 +106,22 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { .isPresent(), is(false)); } - - assertEquals(globalCheckPoint == numDocs - 1 ? 0 : numDocs, resyncTask.getTotalOperations()); if (syncNeeded && globalCheckPoint < numDocs - 1) { - long skippedOps = globalCheckPoint + 1; // everything up to global checkpoint included - assertEquals(skippedOps, resyncTask.getSkippedOperations()); - assertEquals(numDocs - skippedOps, resyncTask.getResyncedOperations()); + if (shard.indexSettings.isSoftDeleteEnabled()) { + assertThat(resyncTask.getSkippedOperations(), equalTo(0)); + assertThat(resyncTask.getResyncedOperations(), equalTo(resyncTask.getTotalOperations())); + assertThat(resyncTask.getTotalOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint))); + } else { + int skippedOps = Math.toIntExact(globalCheckPoint + 1); // everything up to global checkpoint included + assertThat(resyncTask.getSkippedOperations(), equalTo(skippedOps)); + assertThat(resyncTask.getResyncedOperations(), equalTo(numDocs - skippedOps)); + assertThat(resyncTask.getTotalOperations(), equalTo(globalCheckPoint == numDocs - 1 ? 0 : numDocs)); + } } else { - assertEquals(0, resyncTask.getSkippedOperations()); - assertEquals(0, resyncTask.getResyncedOperations()); + assertThat(resyncTask.getSkippedOperations(), equalTo(0)); + assertThat(resyncTask.getResyncedOperations(), equalTo(0)); + assertThat(resyncTask.getTotalOperations(), equalTo(0)); } - closeShards(shard); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 774b272121a5c..b93f170174c3c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -130,7 +131,8 @@ public void onFailedEngine(String reason, @Nullable Exception e) { indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, - (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm); + (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm, + EngineTestCase.tombstoneDocSupplier()); engine = new InternalEngine(config); engine.recoverFromTranslog(Long.MAX_VALUE); listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 89a8813e3e07b..81afab4bb8f7e 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -67,6 +67,7 @@ import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import org.junit.After; import java.io.IOException; import java.util.ArrayList; @@ -110,6 +111,11 @@ protected Collection> nodePlugins() { RecoverySettingsChunkSizePlugin.class); } + @After + public void assertConsistentHistoryInLuceneIndex() throws Exception { + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); + } + private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, RecoverySource recoverySource, boolean primary, String sourceNode, String targetNode) { assertThat(state.getShardId().getId(), equalTo(shardId)); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 4b1419375e6e5..b6f5a7b645169 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; @@ -91,6 +92,7 @@ public void testGetStartingSeqNo() throws Exception { replica.close("test", false); final List commits = DirectoryReader.listCommits(replica.store().directory()); IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.APPEND); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 8e0aa6e85b035..0f322e3b8ea6d 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -411,12 +411,6 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) { - - @Override - boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException { - return randomBoolean(); - } - @Override public void phase1(final IndexCommit snapshot, final Supplier translogOps) { phase1Called.set(true); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 365529d471cfe..9f7e50c03e380 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.SourceToParse; @@ -63,13 +64,13 @@ public void testTranslogHistoryTransferred() throws Exception { int docs = shards.indexDocs(10); getTranslog(shards.getPrimary()).rollGeneration(); shards.flush(); - if (randomBoolean()) { - docs += shards.indexDocs(10); - } + int moreDocs = shards.indexDocs(randomInt(10)); shards.addReplica(); shards.startAll(); final IndexShard replica = shards.getReplicas().get(0); - assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(docs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? moreDocs : docs + moreDocs)); + shards.assertAllEqual(docs + moreDocs); } } @@ -101,12 +102,12 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception { // rolling/flushing is async assertBusy(() -> { assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(19L)); - assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(0)); + assertThat(getTranslog(replica).totalOperations(), equalTo(0)); }); } } - public void testRecoveryWithOutOfOrderDelete() throws Exception { + public void testRecoveryWithOutOfOrderDeleteWithTranslog() throws Exception { /* * The flow of this test: * - delete #1 @@ -118,7 +119,8 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { * - index #5 * - If flush and the translog retention disabled, delete #1 will be removed while index #0 is still retained and replayed. */ - try (ReplicationGroup shards = createGroup(1)) { + Settings settings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false).build(); + try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); // create out of order delete and index op on replica final IndexShard orgReplica = shards.getReplicas().get(0); @@ -140,7 +142,7 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); // index #5 -> force NoOp #4. - orgReplica.applyIndexOperationOnReplica(5, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + orgReplica.applyIndexOperationOnReplica(5, 1, VersionType.EXTERNAL,IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON)); final int translogOps; @@ -170,7 +172,63 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { shards.recoverReplica(newReplica); shards.assertAllEqual(3); - assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(translogOps)); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(translogOps)); + } + } + + public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) + // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted + // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build(); + try (ReplicationGroup shards = createGroup(1, settings)) { + shards.startAll(); + // create out of order delete and index op on replica + final IndexShard orgReplica = shards.getReplicas().get(0); + final String indexName = orgReplica.shardId().getIndexName(); + + // delete #1 + orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL); + orgReplica.flush(new FlushRequest().force(true)); // isolate delete#1 in its own translog generation and lucene segment + // index #0 + orgReplica.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON)); + // index #3 + orgReplica.applyIndexOperationOnReplica(3, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-3", new BytesArray("{}"), XContentType.JSON)); + // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1. + orgReplica.flush(new FlushRequest().force(true).waitIfOngoing(true)); + // index #2 + orgReplica.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-2", new BytesArray("{}"), XContentType.JSON)); + orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); + // index #5 -> force NoOp #4. + orgReplica.applyIndexOperationOnReplica(5, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, + SourceToParse.source(indexName, "type", "id-5", new BytesArray("{}"), XContentType.JSON)); + + if (randomBoolean()) { + if (randomBoolean()) { + logger.info("--> flushing shard (translog/soft-deletes will be trimmed)"); + IndexMetaData.Builder builder = IndexMetaData.builder(orgReplica.indexSettings().getIndexMetaData()); + builder.settings(Settings.builder().put(orgReplica.indexSettings().getSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0)); + orgReplica.indexSettings().updateIndexMetaData(builder.build()); + orgReplica.onSettingsChanged(); + } + flushShard(orgReplica); + } + + final IndexShard orgPrimary = shards.getPrimary(); + shards.promoteReplicaToPrimary(orgReplica).get(); // wait for primary/replica sync to make sure seq# gap is closed. + + IndexShard newReplica = shards.addReplicaWithExistingPath(orgPrimary.shardPath(), orgPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + shards.assertAllEqual(3); + try (Translog.Snapshot snapshot = newReplica.getHistoryOperations("test", 0)) { + assertThat(snapshot, SnapshotMatchers.size(6)); + } } } @@ -222,7 +280,8 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { shards.recoverReplica(newReplica); // file based recovery should be made assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? nonFlushedDocs : numDocs)); // history uuid was restored assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); @@ -326,7 +385,8 @@ public void testShouldFlushAfterPeerRecovery() throws Exception { shards.recoverReplica(replica); // Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false) assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false))); - assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs)); + boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); shards.assertAllEqual(numDocs); } } diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index efabf34216258..409db4231292a 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.MergeSchedulerConfig; @@ -50,6 +51,7 @@ import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -69,6 +71,7 @@ import java.util.EnumSet; import java.util.List; import java.util.Random; +import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -115,6 +118,7 @@ public Settings indexSettings() { return Settings.builder().put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) .build(); } @@ -1029,10 +1033,15 @@ private void assertCumulativeQueryCacheStats(IndicesStatsResponse response) { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32506") public void testFilterCacheStats() throws Exception { - assertAcked(prepareCreate("index").setSettings(Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build()).get()); - indexRandom(true, + Settings settings = Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build(); + assertAcked(prepareCreate("index").setSettings(settings).get()); + indexRandom(false, true, client().prepareIndex("index", "type", "1").setSource("foo", "bar"), client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { + persistGlobalCheckpoint("index"); // Need to persist the global checkpoint for the soft-deletes retention MP. + } + refresh(); ensureGreen(); IndicesStatsResponse response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); @@ -1063,6 +1072,13 @@ public void testFilterCacheStats() throws Exception { assertEquals(DocWriteResponse.Result.DELETED, client().prepareDelete("index", "type", "1").get().getResult()); assertEquals(DocWriteResponse.Result.DELETED, client().prepareDelete("index", "type", "2").get().getResult()); + // Here we are testing that a fully deleted segment should be dropped and its cached is evicted. + // In order to instruct the merge policy not to keep a fully deleted segment, + // we need to flush and make that commit safe so that the SoftDeletesPolicy can drop everything. + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)) { + persistGlobalCheckpoint("index"); + flush("index"); + } refresh(); response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(response); @@ -1196,4 +1212,21 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti assertThat(executionFailures.get(), emptyCollectionOf(Exception.class)); } + + /** + * Persist the global checkpoint on all shards of the given index into disk. + * This makes sure that the persisted global checkpoint on those shards will equal to the in-memory value. + */ + private void persistGlobalCheckpoint(String index) throws Exception { + final Set nodes = internalCluster().nodesInclude(index); + for (String node : nodes) { + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + for (IndexService indexService : indexServices) { + for (IndexShard indexShard : indexService) { + indexShard.sync(); + assertThat(indexShard.getLastSyncedGlobalCheckpoint(), equalTo(indexShard.getGlobalCheckpoint())); + } + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 23c56688e00b4..c25cad61e0740 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; +import org.junit.After; import java.io.IOException; import java.nio.file.FileVisitResult; @@ -58,6 +59,11 @@ protected Collection> nodePlugins() { return Arrays.asList(MockRepository.Plugin.class); } + @After + public void assertConsistentHistoryInLuceneIndex() throws Exception { + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); + } + public static long getFailureCount(String repository) { long failureCount = 0; for (RepositoriesService repositoriesService : diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index e1cf2e47da26f..8d71c04e46843 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -122,6 +122,7 @@ import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.shard.IndexShardTests.getEngineFromShard; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -2048,7 +2049,9 @@ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedExc .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); // only one shard - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1))); + final Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).build(); + assertAcked(prepareCreate("test").setSettings(indexSettings)); ensureGreen(); logger.info("--> indexing"); @@ -2094,7 +2097,13 @@ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedExc SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-2").get().getSnapshots().get(0); List shards = snapshotStatus.getShards(); for (SnapshotIndexShardStatus status : shards) { - assertThat(status.getStats().getProcessedFileCount(), equalTo(2)); // we flush before the snapshot such that we have to process the segments_N files plus the .del file + // we flush before the snapshot such that we have to process the segments_N files plus the .del file + if (INDEX_SOFT_DELETES_SETTING.get(indexSettings)) { + // soft-delete generates DV files. + assertThat(status.getStats().getProcessedFileCount(), greaterThan(2)); + } else { + assertThat(status.getStats().getProcessedFileCount(), equalTo(2)); + } } } } diff --git a/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 23b849b0742d3..5c7cf94e4c788 100644 --- a/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; @@ -783,4 +784,26 @@ public void testGCDeletesZero() throws Exception { .getVersion(), equalTo(-1L)); } + + public void testSpecialVersioning() { + internalCluster().ensureAtLeastNumDataNodes(2); + createIndex("test", Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()); + IndexResponse doc1 = client().prepareIndex("test", "type", "1").setSource("field", "value1") + .setVersion(0).setVersionType(VersionType.EXTERNAL).execute().actionGet(); + assertThat(doc1.getVersion(), equalTo(0L)); + IndexResponse doc2 = client().prepareIndex("test", "type", "1").setSource("field", "value2") + .setVersion(Versions.MATCH_ANY).setVersionType(VersionType.INTERNAL).execute().actionGet(); + assertThat(doc2.getVersion(), equalTo(1L)); + client().prepareDelete("test", "type", "1").get(); //v2 + IndexResponse doc3 = client().prepareIndex("test", "type", "1").setSource("field", "value3") + .setVersion(Versions.MATCH_DELETED).setVersionType(VersionType.INTERNAL).execute().actionGet(); + assertThat(doc3.getVersion(), equalTo(3L)); + IndexResponse doc4 = client().prepareIndex("test", "type", "1").setSource("field", "value4") + .setVersion(4L).setVersionType(VersionType.EXTERNAL_GTE).execute().actionGet(); + assertThat(doc4.getVersion(), equalTo(4L)); + // Make sure that these versions are replicated correctly + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); + ensureGreen("test"); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 9682893b61aef..0ace4a4cfa441 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -19,14 +19,18 @@ package org.elasticsearch.index.engine; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; +import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.Term; @@ -34,33 +38,41 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -81,17 +93,30 @@ import java.io.IOException; import java.nio.charset.Charset; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; +import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.ToLongBiFunction; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; +import static java.util.Collections.shuffle; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; +import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; public abstract class EngineTestCase extends ESTestCase { @@ -129,6 +154,20 @@ protected static void assertVisibleCount(Engine engine, int numDocs, boolean ref } } + protected Settings indexSettings() { + // TODO randomize more settings + return Settings.builder() + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), + between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) + .build(); + } + @Override @Before public void setUp() throws Exception { @@ -143,13 +182,7 @@ public void setUp() throws Exception { } else { codecName = "default"; } - defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), - between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) - .build()); // TODO randomize more settings + defaultSettings = IndexSettingsModule.newIndexSettings("test", indexSettings()); threadPool = new TestThreadPool(getClass().getName()); store = createStore(); storeReplica = createStore(); @@ -181,7 +214,7 @@ public EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSuppl new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), globalCheckpointSupplier, config.getPrimaryTermSupplier()); + config.getCircuitBreakerService(), globalCheckpointSupplier, config.getPrimaryTermSupplier(), tombstoneDocSupplier()); } public EngineConfig copy(EngineConfig config, Analyzer analyzer) { @@ -190,7 +223,18 @@ public EngineConfig copy(EngineConfig config, Analyzer analyzer) { new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier()); + config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(), + config.getTombstoneDocSupplier()); + } + + public EngineConfig copy(EngineConfig config, MergePolicy mergePolicy) { + return new EngineConfig(config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), + config.getWarmer(), config.getStore(), mergePolicy, config.getAnalyzer(), config.getSimilarity(), + new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), + config.getTranslogConfig(), config.getFlushMergesAfter(), + config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), + config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(), + config.getTombstoneDocSupplier()); } @Override @@ -199,9 +243,11 @@ public void tearDown() throws Exception { super.tearDown(); if (engine != null && engine.isClosed.get() == false) { engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, createMapperService("test")); } if (replicaEngine != null && replicaEngine.isClosed.get() == false) { replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine, createMapperService("test")); } IOUtils.close( replicaEngine, storeReplica, @@ -229,8 +275,18 @@ public static ParsedDocument createParsedDoc(String id, String routing) { return testParsedDocument(id, routing, testDocumentWithTextField(), new BytesArray("{ \"value\" : \"test\" }"), null); } + public static ParsedDocument createParsedDoc(String id, String routing, boolean recoverySource) { + return testParsedDocument(id, routing, testDocumentWithTextField(), new BytesArray("{ \"value\" : \"test\" }"), null, + recoverySource); + } + protected static ParsedDocument testParsedDocument( String id, String routing, ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { + return testParsedDocument(id, routing, document, source, mappingUpdate, false); + } + protected static ParsedDocument testParsedDocument( + String id, String routing, ParseContext.Document document, BytesReference source, Mapping mappingUpdate, + boolean recoverySource) { Field uidField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 0); SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); @@ -240,11 +296,57 @@ protected static ParsedDocument testParsedDocument( document.add(seqID.seqNoDocValue); document.add(seqID.primaryTerm); BytesRef ref = source.toBytesRef(); - document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); + if (recoverySource) { + document.add(new StoredField(SourceFieldMapper.RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); + document.add(new NumericDocValuesField(SourceFieldMapper.RECOVERY_SOURCE_NAME, 1)); + } else { + document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); + } return new ParsedDocument(versionField, seqID, id, "test", routing, Arrays.asList(document), source, XContentType.JSON, mappingUpdate); } + /** + * Creates a tombstone document that only includes uid, seq#, term and version fields. + */ + public static EngineConfig.TombstoneDocSupplier tombstoneDocSupplier(){ + return new EngineConfig.TombstoneDocSupplier() { + @Override + public ParsedDocument newDeleteTombstoneDoc(String type, String id) { + final ParseContext.Document doc = new ParseContext.Document(); + Field uidField = new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE); + doc.add(uidField); + Field versionField = new NumericDocValuesField(VersionFieldMapper.NAME, 0); + doc.add(versionField); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + doc.add(seqID.seqNo); + doc.add(seqID.seqNoDocValue); + doc.add(seqID.primaryTerm); + seqID.tombstoneField.setLongValue(1); + doc.add(seqID.tombstoneField); + return new ParsedDocument(versionField, seqID, id, type, null, + Collections.singletonList(doc), new BytesArray("{}"), XContentType.JSON, null); + } + + @Override + public ParsedDocument newNoopTombstoneDoc(String reason) { + final ParseContext.Document doc = new ParseContext.Document(); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + doc.add(seqID.seqNo); + doc.add(seqID.seqNoDocValue); + doc.add(seqID.primaryTerm); + seqID.tombstoneField.setLongValue(1); + doc.add(seqID.tombstoneField); + Field versionField = new NumericDocValuesField(VersionFieldMapper.NAME, 0); + doc.add(versionField); + BytesRef byteRef = new BytesRef(reason); + doc.add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length)); + return new ParsedDocument(versionField, seqID, null, null, null, + Collections.singletonList(doc), null, XContentType.JSON, null); + } + }; + } + protected Store createStore() throws IOException { return createStore(newDirectory()); } @@ -462,7 +564,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { new NoneCircuitBreakerService(), globalCheckpointSupplier == null ? new ReplicationTracker(shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED, update -> {}) : - globalCheckpointSupplier, primaryTerm::get); + globalCheckpointSupplier, primaryTerm::get, tombstoneDocSupplier()); return config; } @@ -475,7 +577,7 @@ protected static BytesArray bytesArray(String string) { return new BytesArray(string.getBytes(Charset.defaultCharset())); } - protected Term newUid(String id) { + protected static Term newUid(String id) { return new Term("_id", Uid.encodeId(id)); } @@ -499,9 +601,293 @@ protected Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long } protected Engine.Delete replicaDeleteForDoc(String id, long version, long seqNo, long startTime) { - return new Engine.Delete("test", id, newUid(id), seqNo, 1, version, VersionType.EXTERNAL, + return new Engine.Delete("test", id, newUid(id), seqNo, primaryTerm.get(), version, VersionType.EXTERNAL, Engine.Operation.Origin.REPLICA, startTime); } + protected static void assertVisibleCount(InternalEngine engine, int numDocs) throws IOException { + assertVisibleCount(engine, numDocs, true); + } + + protected static void assertVisibleCount(InternalEngine engine, int numDocs, boolean refresh) throws IOException { + if (refresh) { + engine.refresh("test"); + } + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + final TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.searcher().search(new MatchAllDocsQuery(), collector); + assertThat(collector.getTotalHits(), equalTo(numDocs)); + } + } + + public static List generateSingleDocHistory( + final boolean forReplica, + final VersionType versionType, + final boolean partialOldPrimary, + final long primaryTerm, + final int minOpCount, + final int maxOpCount, + final String docId) { + final int numOfOps = randomIntBetween(minOpCount, maxOpCount); + final List ops = new ArrayList<>(); + final Term id = newUid(docId); + final int startWithSeqNo; + if (partialOldPrimary) { + startWithSeqNo = randomBoolean() ? numOfOps - 1 : randomIntBetween(0, numOfOps - 1); + } else { + startWithSeqNo = 0; + } + final String valuePrefix = (forReplica ? "r_" : "p_") + docId + "_"; + final boolean incrementTermWhenIntroducingSeqNo = randomBoolean(); + for (int i = 0; i < numOfOps; i++) { + final Engine.Operation op; + final long version; + switch (versionType) { + case INTERNAL: + version = forReplica ? i : Versions.MATCH_ANY; + break; + case EXTERNAL: + version = i; + break; + case EXTERNAL_GTE: + version = randomBoolean() ? Math.max(i - 1, 0) : i; + break; + case FORCE: + version = randomNonNegativeLong(); + break; + default: + throw new UnsupportedOperationException("unknown version type: " + versionType); + } + if (randomBoolean()) { + op = new Engine.Index(id, testParsedDocument(docId, null, testDocumentWithTextField(valuePrefix + i), B_1, null), + forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, + forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, + version, + forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType, + forReplica ? REPLICA : PRIMARY, + System.currentTimeMillis(), -1, false + ); + } else { + op = new Engine.Delete("test", docId, id, + forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbers.UNASSIGNED_SEQ_NO, + forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm, + version, + forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType, + forReplica ? REPLICA : PRIMARY, + System.currentTimeMillis()); + } + ops.add(op); + } + return ops; + } + + public static void assertOpsOnReplica( + final List ops, + final InternalEngine replicaEngine, + boolean shuffleOps, + final Logger logger) throws IOException { + final Engine.Operation lastOp = ops.get(ops.size() - 1); + final String lastFieldValue; + if (lastOp instanceof Engine.Index) { + Engine.Index index = (Engine.Index) lastOp; + lastFieldValue = index.docs().get(0).get("value"); + } else { + // delete + lastFieldValue = null; + } + if (shuffleOps) { + int firstOpWithSeqNo = 0; + while (firstOpWithSeqNo < ops.size() && ops.get(firstOpWithSeqNo).seqNo() < 0) { + firstOpWithSeqNo++; + } + // shuffle ops but make sure legacy ops are first + shuffle(ops.subList(0, firstOpWithSeqNo), random()); + shuffle(ops.subList(firstOpWithSeqNo, ops.size()), random()); + } + boolean firstOp = true; + for (Engine.Operation op : ops) { + logger.info("performing [{}], v [{}], seq# [{}], term [{}]", + op.operationType().name().charAt(0), op.version(), op.seqNo(), op.primaryTerm()); + if (op instanceof Engine.Index) { + Engine.IndexResult result = replicaEngine.index((Engine.Index) op); + // replicas don't really care to about creation status of documents + // this allows to ignore the case where a document was found in the live version maps in + // a delete state and return false for the created flag in favor of code simplicity + // as deleted or not. This check is just signal regression so a decision can be made if it's + // intentional + assertThat(result.isCreated(), equalTo(firstOp)); + assertThat(result.getVersion(), equalTo(op.version())); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); + + } else { + Engine.DeleteResult result = replicaEngine.delete((Engine.Delete) op); + // Replicas don't really care to about found status of documents + // this allows to ignore the case where a document was found in the live version maps in + // a delete state and return true for the found flag in favor of code simplicity + // his check is just signal regression so a decision can be made if it's + // intentional + assertThat(result.isFound(), equalTo(firstOp == false)); + assertThat(result.getVersion(), equalTo(op.version())); + assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); + } + if (randomBoolean()) { + replicaEngine.refresh("test"); + } + if (randomBoolean()) { + replicaEngine.flush(); + replicaEngine.refresh("test"); + } + firstOp = false; + } + + assertVisibleCount(replicaEngine, lastFieldValue == null ? 0 : 1); + if (lastFieldValue != null) { + try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { + final TotalHitCountCollector collector = new TotalHitCountCollector(); + searcher.searcher().search(new TermQuery(new Term("value", lastFieldValue)), collector); + assertThat(collector.getTotalHits(), equalTo(1)); + } + } + } + + protected void concurrentlyApplyOps(List ops, InternalEngine engine) throws InterruptedException { + Thread[] thread = new Thread[randomIntBetween(3, 5)]; + CountDownLatch startGun = new CountDownLatch(thread.length); + AtomicInteger offset = new AtomicInteger(-1); + for (int i = 0; i < thread.length; i++) { + thread[i] = new Thread(() -> { + startGun.countDown(); + try { + startGun.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + int docOffset; + while ((docOffset = offset.incrementAndGet()) < ops.size()) { + try { + final Engine.Operation op = ops.get(docOffset); + if (op instanceof Engine.Index) { + engine.index((Engine.Index) op); + } else if (op instanceof Engine.Delete){ + engine.delete((Engine.Delete) op); + } else { + engine.noOp((Engine.NoOp) op); + } + if ((docOffset + 1) % 4 == 0) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); + } + } catch (IOException e) { + throw new AssertionError(e); + } + } + }); + thread[i].start(); + } + for (int i = 0; i < thread.length; i++) { + thread[i].join(); + } + } + + /** + * Gets all docId from the given engine. + */ + public static Set getDocIds(Engine engine, boolean refresh) throws IOException { + if (refresh) { + engine.refresh("test_get_doc_ids"); + } + try (Engine.Searcher searcher = engine.acquireSearcher("test_get_doc_ids")) { + Set ids = new HashSet<>(); + for (LeafReaderContext leafContext : searcher.reader().leaves()) { + LeafReader reader = leafContext.reader(); + Bits liveDocs = reader.getLiveDocs(); + for (int i = 0; i < reader.maxDoc(); i++) { + if (liveDocs == null || liveDocs.get(i)) { + Document uuid = reader.document(i, Collections.singleton(IdFieldMapper.NAME)); + BytesRef binaryID = uuid.getBinaryValue(IdFieldMapper.NAME); + ids.add(Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length))); + } + } + } + return ids; + } + } + + /** + * Reads all engine operations that have been processed by the engine from Lucene index. + * The returned operations are sorted and de-duplicated, thus each sequence number will be have at most one operation. + */ + public static List readAllOperationsInLucene(Engine engine, MapperService mapper) throws IOException { + final List operations = new ArrayList<>(); + long maxSeqNo = Math.max(0, ((InternalEngine)engine).getLocalCheckpointTracker().getMaxSeqNo()); + try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapper, 0, maxSeqNo, false)) { + Translog.Operation op; + while ((op = snapshot.next()) != null){ + operations.add(op); + } + } + return operations; + } + + /** + * Asserts the provided engine has a consistent document history between translog and Lucene index. + */ + public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper) throws IOException { + if (mapper.types().isEmpty() || engine.config().getIndexSettings().isSoftDeleteEnabled() == false) { + return; + } + final long maxSeqNo = ((InternalEngine) engine).getLocalCheckpointTracker().getMaxSeqNo(); + if (maxSeqNo < 0) { + return; // nothing to check + } + final Map translogOps = new HashMap<>(); + try (Translog.Snapshot snapshot = EngineTestCase.getTranslog(engine).newSnapshot()) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + translogOps.put(op.seqNo(), op); + } + } + final Map luceneOps = readAllOperationsInLucene(engine, mapper).stream() + .collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity())); + final long globalCheckpoint = EngineTestCase.getTranslog(engine).getLastSyncedGlobalCheckpoint(); + final long retainedOps = engine.config().getIndexSettings().getSoftDeleteRetentionOperations(); + final long seqNoForRecovery; + try (Engine.IndexCommitRef safeCommit = engine.acquireSafeIndexCommit()) { + seqNoForRecovery = Long.parseLong(safeCommit.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; + } + final long minSeqNoToRetain = Math.min(seqNoForRecovery, globalCheckpoint + 1 - retainedOps); + for (Translog.Operation translogOp : translogOps.values()) { + final Translog.Operation luceneOp = luceneOps.get(translogOp.seqNo()); + if (luceneOp == null) { + if (minSeqNoToRetain <= translogOp.seqNo() && translogOp.seqNo() <= maxSeqNo) { + fail("Operation not found seq# [" + translogOp.seqNo() + "], global checkpoint [" + globalCheckpoint + "], " + + "retention policy [" + retainedOps + "], maxSeqNo [" + maxSeqNo + "], translog op [" + translogOp + "]"); + } else { + continue; + } + } + assertThat(luceneOp, notNullValue()); + assertThat(luceneOp.toString(), luceneOp.primaryTerm(), equalTo(translogOp.primaryTerm())); + assertThat(luceneOp.opType(), equalTo(translogOp.opType())); + if (luceneOp.opType() == Translog.Operation.Type.INDEX) { + assertThat(luceneOp.getSource().source, equalTo(translogOp.getSource().source)); + } + } + } + + protected MapperService createMapperService(String type) throws IOException { + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)) + .putMapping(type, "{\"properties\": {}}") + .build(); + MapperService mapperService = MapperTestUtils.newMapperService(new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), + createTempDir(), Settings.EMPTY, "test"); + mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_UPDATE, false); + return mapperService; + } /** * Exposes a translog associated with the given engine for testing purpose. diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 7b7c802340946..abeff15b1cbc5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -60,6 +60,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; @@ -99,10 +100,14 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase protected final Index index = new Index("test", "uuid"); private final ShardId shardId = new ShardId(index, 0); - private final Map indexMapping = Collections.singletonMap("type", "{ \"type\": {} }"); + protected final Map indexMapping = Collections.singletonMap("type", "{ \"type\": {} }"); protected ReplicationGroup createGroup(int replicas) throws IOException { - IndexMetaData metaData = buildIndexMetaData(replicas); + return createGroup(replicas, Settings.EMPTY); + } + + protected ReplicationGroup createGroup(int replicas, Settings settings) throws IOException { + IndexMetaData metaData = buildIndexMetaData(replicas, settings, indexMapping); return new ReplicationGroup(metaData); } @@ -111,9 +116,17 @@ protected IndexMetaData buildIndexMetaData(int replicas) throws IOException { } protected IndexMetaData buildIndexMetaData(int replicas, Map mappings) throws IOException { + return buildIndexMetaData(replicas, Settings.EMPTY, mappings); + } + + protected IndexMetaData buildIndexMetaData(int replicas, Settings indexSettings, Map mappings) throws IOException { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) + .put(indexSettings) .build(); IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName()) .settings(settings) @@ -146,7 +159,7 @@ protected class ReplicationGroup implements AutoCloseable, Iterable } }); - ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { + protected ReplicationGroup(final IndexMetaData indexMetaData) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); primary = newShard(primaryRouting, indexMetaData, null, getEngineFactory(primaryRouting), () -> {}); replicas = new CopyOnWriteArrayList<>(); @@ -448,7 +461,7 @@ private void updateAllocationIDsOnPrimary() throws IOException { } } - abstract class ReplicationAction, + protected abstract class ReplicationAction, ReplicaRequest extends ReplicationRequest, Response extends ReplicationResponse> { private final Request request; @@ -456,7 +469,7 @@ abstract class ReplicationAction, private final ReplicationGroup replicationGroup; private final String opType; - ReplicationAction(Request request, ActionListener listener, ReplicationGroup group, String opType) { + protected ReplicationAction(Request request, ActionListener listener, ReplicationGroup group, String opType) { this.request = request; this.listener = listener; this.replicationGroup = group; @@ -582,11 +595,11 @@ public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, R } } - class PrimaryResult implements ReplicationOperation.PrimaryResult { + protected class PrimaryResult implements ReplicationOperation.PrimaryResult { final ReplicaRequest replicaRequest; final Response finalResponse; - PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { + public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { this.replicaRequest = replicaRequest; this.finalResponse = finalResponse; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 56c78930244e7..b16dd7df1affb 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -18,13 +18,8 @@ */ package org.elasticsearch.index.shard; -import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexNotFoundException; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.index.IndexRequest; @@ -58,10 +53,8 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngineFactory; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.similarity.SimilarityService; @@ -236,6 +229,9 @@ protected IndexShard newShard( Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) .put(settings) .build(); IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName()) @@ -376,7 +372,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe } /** - * Takes an existing shard, closes it and and starts a new initialing shard at the same location + * Takes an existing shard, closes it and starts a new initialing shard at the same location * * @param listeners new listerns to use for the newly created shard */ @@ -388,7 +384,7 @@ protected IndexShard reinitShard(IndexShard current, IndexingOperationListener.. } /** - * Takes an existing shard, closes it and and starts a new initialing shard at the same location + * Takes an existing shard, closes it and starts a new initialing shard at the same location * * @param routing the shard routing to use for the newly created shard. * @param listeners new listerns to use for the newly created shard @@ -419,8 +415,13 @@ protected IndexShard newStartedShard() throws IOException { * @param primary controls whether the shard will be a primary or a replica. */ protected IndexShard newStartedShard(final boolean primary) throws IOException { - return newStartedShard(primary, Settings.EMPTY, new InternalEngineFactory()); + return newStartedShard(primary, Settings.EMPTY); } + + protected IndexShard newStartedShard(final boolean primary, Settings settings) throws IOException { + return newStartedShard(primary, settings, new InternalEngineFactory()); + } + /** * Creates a new empty shard with the specified settings and engine factory and starts it. * @@ -458,6 +459,7 @@ protected void closeShards(Iterable shards) throws IOException { for (IndexShard shard : shards) { if (shard != null) { try { + assertConsistentHistoryBetweenTranslogAndLucene(shard); shard.close("test", false); } finally { IOUtils.close(shard.store()); @@ -639,22 +641,7 @@ private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) th } protected Set getShardDocUIDs(final IndexShard shard) throws IOException { - shard.refresh("get_uids"); - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - Set ids = new HashSet<>(); - for (LeafReaderContext leafContext : searcher.reader().leaves()) { - LeafReader reader = leafContext.reader(); - Bits liveDocs = reader.getLiveDocs(); - for (int i = 0; i < reader.maxDoc(); i++) { - if (liveDocs == null || liveDocs.get(i)) { - Document uuid = reader.document(i, Collections.singleton(IdFieldMapper.NAME)); - BytesRef binaryID = uuid.getBinaryValue(IdFieldMapper.NAME); - ids.add(Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length))); - } - } - } - return ids; - } + return EngineTestCase.getDocIds(shard.getEngine(), true); } protected void assertDocCount(IndexShard shard, int docDount) throws IOException { @@ -667,6 +654,12 @@ protected void assertDocs(IndexShard shard, String... ids) throws IOException { assertThat(shardDocUIDs, hasSize(ids.length)); } + public static void assertConsistentHistoryBetweenTranslogAndLucene(IndexShard shard) throws IOException { + final Engine engine = shard.getEngineOrNull(); + if (engine != null) { + EngineTestCase.assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, shard.mapperService()); + } + } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) throws IOException { return indexDoc(shard, type, id, "{}"); @@ -711,11 +704,14 @@ protected void updateMappings(IndexShard shard, IndexMetaData indexMetadata) { } protected Engine.DeleteResult deleteDoc(IndexShard shard, String type, String id) throws IOException { + final Engine.DeleteResult result; if (shard.routingEntry().primary()) { - return shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL); + result = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, type, id, VersionType.INTERNAL); + shard.updateLocalCheckpointForShard(shard.routingEntry().allocationId().getId(), shard.getEngine().getLocalCheckpoint()); } else { - return shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id, VersionType.EXTERNAL); + result = shard.applyDeleteOperationOnReplica(shard.seqNoStats().getMaxSeqNo() + 1, 0L, type, id, VersionType.EXTERNAL); } + return result; } protected void flushShard(IndexShard shard) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index b985d61260e50..d754224d03a8b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -725,6 +725,10 @@ public Settings indexSettings() { } // always default delayed allocation to 0 to make sure we have tests are not delayed builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0); + builder.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + if (randomBoolean()) { + builder.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)); + } return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index cd1aa6b020d35..c7f2763099cc0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -42,6 +42,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.MockNode; import org.elasticsearch.node.Node; @@ -87,6 +88,14 @@ protected void startNode(long seed) throws Exception { .setOrder(0) .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get(); + client().admin().indices() + .preparePutTemplate("random-soft-deletes-template") + .setPatterns(Collections.singletonList("*")) + .setOrder(0) + .setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), + randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) + ).get(); } private static void stopNode() throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 179c6edb03cc7..79e64869ad3a8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1164,6 +1164,26 @@ private void assertOpenTranslogReferences() throws Exception { }); } + /** + * Asserts that the document history in Lucene index is consistent with Translog's on every index shard of the cluster. + * This assertion might be expensive, thus we prefer not to execute on every test but only interesting tests. + */ + public void assertConsistentHistoryBetweenTranslogAndLuceneIndex() throws IOException { + final Collection nodesAndClients = nodes.values(); + for (NodeAndClient nodeAndClient : nodesAndClients) { + IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); + for (IndexService indexService : indexServices) { + for (IndexShard indexShard : indexService) { + try { + IndexShardTestCase.assertConsistentHistoryBetweenTranslogAndLucene(indexShard); + } catch (AlreadyClosedException ignored) { + // shard is closed + } + } + } + } + } + private void randomlyResetClients() throws IOException { // only reset the clients on nightly tests, it causes heavy load... if (RandomizedTest.isNightly() && rarely(random)) { From 11b4fc8f417eee5e32d6e0a3333cce92e44be0ed Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 31 Aug 2018 15:32:22 -0400 Subject: [PATCH 49/52] TEST: Disable randomized soft-deletes settings Tracked at #33321 --- .../java/org/elasticsearch/index/engine/EngineTestCase.java | 3 ++- .../src/main/java/org/elasticsearch/test/ESIntegTestCase.java | 3 ++- .../main/java/org/elasticsearch/test/ESSingleNodeTestCase.java | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 0ace4a4cfa441..64cb9108b4722 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -162,7 +162,8 @@ protected Settings indexSettings() { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + //norelease - AwaitsFix: https://github.com/elastic/elasticsearch/issues/33321 + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) .build(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index d754224d03a8b..3136503c7f7d1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -725,7 +725,8 @@ public Settings indexSettings() { } // always default delayed allocation to 0 to make sure we have tests are not delayed builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0); - builder.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + //norelease - AwaitsFix: https://github.com/elastic/elasticsearch/issues/33321 + builder.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); if (randomBoolean()) { builder.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index c7f2763099cc0..094fee565d7a5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -92,7 +92,8 @@ protected void startNode(long seed) throws Exception { .preparePutTemplate("random-soft-deletes-template") .setPatterns(Collections.singletonList("*")) .setOrder(0) - .setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()) + //norelease - AwaitsFix: https://github.com/elastic/elasticsearch/issues/33321 + .setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)) ).get(); From fc7bf5e4afe9a8da33afadae2dd93f302c9aa5ae Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 31 Aug 2018 12:54:12 -0400 Subject: [PATCH 50/52] TEST: Disable soft-deletes in ParentChildTestCase Tracked at #33318 --- .../java/org/elasticsearch/join/query/ParentChildTestCase.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java index 2e2cdfb200453..c947e831903d3 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.join.ParentJoinPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -59,6 +60,8 @@ protected Collection> transportClientPlugins() { @Override public Settings indexSettings() { Settings.Builder builder = Settings.builder().put(super.indexSettings()) + // AwaitsFix: https://github.com/elastic/elasticsearch/issues/33318 + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) // aggressive filter caching so that we can assert on the filter cache size .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), true) .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true); From 3cd9ab4e4290dbb796956e751aac7d1ee868ea06 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Fri, 31 Aug 2018 17:18:22 -0400 Subject: [PATCH 51/52] [Rollup] Fix FullClusterRestart test We need to wait for the job to fully initialize and start before we can attempt to stop it. If we don't, it's possible for the stop API to be called before the persistent task is fully loaded and it'll throw an exception. Closes #32773 --- .../org/elasticsearch/xpack/restart/FullClusterRestartIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 5e74a9933bcc1..e240c179d87ec 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -344,7 +344,6 @@ public void testRollupAfterRestart() throws Exception { } } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32773") public void testRollupIDSchemeAfterRestart() throws Exception { assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); assumeTrue("Rollup ID scheme changed in 6.4", oldClusterVersion.before(Version.V_6_4_0)); @@ -412,6 +411,8 @@ public void testRollupIDSchemeAfterRestart() throws Exception { indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}"); client().performRequest(indexRequest); + assertRollUpJob("rollup-id-test"); + // stop the rollup job to force a state save, which will upgrade the ID final Request stopRollupJobRequest = new Request("POST", "_xpack/rollup/job/rollup-id-test/_stop"); Map stopRollupJobResponse = entityAsMap(client().performRequest(stopRollupJobRequest)); From b9c45ff1a5456cef70513de0172b3c02f89d5d40 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 31 Aug 2018 18:48:19 -0400 Subject: [PATCH 52/52] Mute test watcher usage stats output Tracked at #33326 --- .../resources/rest-api-spec/test/watcher/usage/10_basic.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml index a33fcdb529745..7a22ad322bfc2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -1,6 +1,8 @@ --- "Test watcher usage stats output": - + - skip: + version: "all" + reason: AwaitsFix at https://github.com/elastic/elasticsearch/issues/33326 - do: catch: missing xpack.watcher.delete_watch: