"""
+ new ProcessBuilder(
+ 'buildkite-agent',
+ 'annotate',
+ '--context',
+ 'gradle-build-scans',
+ '--append',
+ '--style',
+ result.failure ? 'error' : 'info',
+ body
+ )
+ .start()
+ .waitFor()
+ }
}
} else {
tag 'LOCAL'
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerResult.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerResult.java
new file mode 100644
index 000000000000..d101c0046f92
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerResult.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal.docker;
+
+import java.util.Objects;
+
+/**
+ * This class models the result of running a command. It captures the exit code, standard output and standard error and allows
+ * applying String filter for stdout as this is intended to create configuration cache compatible output which
+ * aims to be agnostic.
+ */
+public class DockerResult {
+
+ private int exitCode;
+ private String stdout;
+ private String stderr;
+
+ public DockerResult(int exitCode, String stdout, String stderr) {
+ this.exitCode = exitCode;
+ this.stdout = stdout;
+ this.stderr = stderr;
+ }
+
+ public int getExitCode() {
+ return exitCode;
+ }
+
+ public String getStdout() {
+ return stdout;
+ }
+
+ public String getStderr() {
+ return stderr;
+ }
+
+ public void setExitCode(int exitCode) {
+ this.exitCode = exitCode;
+ }
+
+ public void setStdout(String stdout) {
+ this.stdout = stdout;
+ }
+
+ public void setStderr(String stderr) {
+ this.stderr = stderr;
+ }
+
+ public boolean isSuccess() {
+ return exitCode == 0;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DockerResult that = (DockerResult) o;
+ return exitCode == that.exitCode && Objects.equals(stdout, that.stdout) && Objects.equals(stderr, that.stderr);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(exitCode, stdout, stderr);
+ }
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java
index 2f702b340130..84728d031c40 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerSupportService.java
@@ -14,12 +14,10 @@
import org.gradle.api.GradleException;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
+import org.gradle.api.provider.ProviderFactory;
import org.gradle.api.services.BuildService;
import org.gradle.api.services.BuildServiceParameters;
-import org.gradle.process.ExecOperations;
-import org.gradle.process.ExecResult;
-import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
@@ -56,12 +54,12 @@ public abstract class DockerSupportService implements BuildService "");
- // If docker all checks out, see if docker-compose is available and working
Optional composePath = getDockerComposePath();
if (lastResult.isSuccess() && composePath.isPresent()) {
isComposeAvailable = runCommand(composePath.get(), "version").isSuccess();
@@ -109,9 +103,12 @@ public DockerAvailability getDockerAvailability() {
// Now let's check if buildx is available and what supported platforms exist
if (lastResult.isSuccess()) {
- Result buildxResult = runCommand(dockerPath, "buildx", "inspect", "--bootstrap");
+ DockerResult buildxResult = runCommand(
+ Arrays.asList(dockerPath, "buildx", "inspect", "--bootstrap"),
+ input -> input.lines().filter(l -> l.startsWith("Platforms:")).collect(Collectors.joining("\n"))
+ );
if (buildxResult.isSuccess()) {
- supportedArchitectures = buildxResult.stdout()
+ supportedArchitectures = buildxResult.getStdout()
.lines()
.filter(l -> l.startsWith("Platforms:"))
.map(l -> l.substring(10))
@@ -127,6 +124,8 @@ public DockerAvailability getDockerAvailability() {
}
}
}
+ } else {
+ dockerPath = null;
}
boolean isAvailable = isVersionHighEnough && lastResult != null && lastResult.isSuccess();
@@ -146,6 +145,17 @@ public DockerAvailability getDockerAvailability() {
return this.dockerAvailability;
}
+ private DockerResult runCommand(List args, DockerValueSource.OutputFilter outputFilter) {
+ return providerFactory.of(DockerValueSource.class, params -> {
+ params.getParameters().getArgs().addAll(args);
+ params.getParameters().getOutputFilter().set(outputFilter);
+ }).get();
+ }
+
+ private DockerResult runCommand(String... args) {
+ return runCommand(Arrays.asList(args), input -> input);
+ }
+
private boolean dockerDaemonIsRunning(String lastResultOutput) {
return lastResultOutput.contains("Cannot connect to the Docker daemon") == false;
}
@@ -198,8 +208,8 @@ void failIfDockerUnavailable(List tasks) {
availability.version == null ? "" : " v" + availability.version,
tasks.size() > 1 ? "s" : "",
String.join("\n", tasks),
- availability.lastCommand.exitCode,
- availability.lastCommand.stderr.trim()
+ availability.lastCommand.getExitCode(),
+ availability.lastCommand.getStderr().trim()
);
throwDockerRequiredException(message);
}
@@ -319,32 +329,6 @@ private void throwDockerRequiredException(final String message, Exception e) {
);
}
- /**
- * Runs a command and captures the exit code, standard output and standard error.
- *
- * @param args the command and any arguments to execute
- * @return a object that captures the result of running the command. If an exception occurring
- * while running the command, or the process was killed after reaching the 10s timeout,
- * then the exit code will be -1.
- */
- private Result runCommand(String... args) {
- if (args.length == 0) {
- throw new IllegalArgumentException("Cannot execute with no command");
- }
-
- ByteArrayOutputStream stdout = new ByteArrayOutputStream();
- ByteArrayOutputStream stderr = new ByteArrayOutputStream();
-
- final ExecResult execResult = execOperations.exec(spec -> {
- // The redundant cast is to silence a compiler warning.
- spec.setCommandLine((Object[]) args);
- spec.setStandardOutput(stdout);
- spec.setErrorOutput(stderr);
- spec.setIgnoreExitValue(true);
- });
- return new Result(execResult.getExitValue(), stdout.toString(), stderr.toString());
- }
-
/**
* An immutable class that represents the results of a Docker search from {@link #getDockerAvailability()}}.
*/
@@ -377,22 +361,12 @@ public record DockerAvailability(
Version version,
// Information about the last command executes while probing Docker, or null.
- Result lastCommand,
+ DockerResult lastCommand,
// Supported build architectures
Set supportedArchitectures
) {}
- /**
- * This class models the result of running a command. It captures the exit code, standard output and standard error.
- */
- private record Result(int exitCode, String stdout, String stderr) {
-
- boolean isSuccess() {
- return exitCode == 0;
- }
- }
-
interface Parameters extends BuildServiceParameters {
File getExclusionsFile();
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerValueSource.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerValueSource.java
new file mode 100644
index 000000000000..d71208b624d7
--- /dev/null
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/DockerValueSource.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.gradle.internal.docker;
+
+import org.gradle.api.provider.ListProperty;
+import org.gradle.api.provider.Property;
+import org.gradle.api.provider.ValueSource;
+import org.gradle.api.provider.ValueSourceParameters;
+import org.gradle.process.ExecOperations;
+import org.gradle.process.ExecResult;
+
+import java.io.ByteArrayOutputStream;
+import java.util.List;
+
+import javax.inject.Inject;
+
+public abstract class DockerValueSource implements ValueSource {
+ public interface OutputFilter {
+ String filter(String input);
+ }
+
+ interface Parameters extends ValueSourceParameters {
+ ListProperty getArgs();
+
+ Property getOutputFilter();
+ }
+
+ @Inject
+ abstract protected ExecOperations getExecOperations();
+
+ @Override
+ public DockerResult obtain() {
+ return runCommand(getParameters().getArgs().get());
+ }
+
+ /**
+ * Runs a command and captures the exit code, standard output and standard error.
+ *
+ * @param args the command and any arguments to execute
+ * @return a object that captures the result of running the command. If an exception occurring
+ * while running the command, or the process was killed after reaching the 10s timeout,
+ * then the exit code will be -1.
+ */
+ private DockerResult runCommand(List args) {
+ if (args.size() == 0) {
+ throw new IllegalArgumentException("Cannot execute with no command");
+ }
+
+ ByteArrayOutputStream stdout = new ByteArrayOutputStream();
+ ByteArrayOutputStream stderr = new ByteArrayOutputStream();
+
+ final ExecResult execResult = getExecOperations().exec(spec -> {
+ // The redundant cast is to silence a compiler warning.
+ spec.setCommandLine(args);
+ spec.setStandardOutput(stdout);
+ spec.setErrorOutput(stderr);
+ spec.setIgnoreExitValue(true);
+ });
+ return new DockerResult(execResult.getExitValue(), filtered(stdout.toString()), stderr.toString());
+ }
+
+ private String filtered(String input) {
+ return getParameters().getOutputFilter().get().filter(input);
+ }
+
+}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java
index e158dd7c755c..194d0361980e 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java
@@ -8,25 +8,568 @@
package org.elasticsearch.gradle.internal.precommit;
-import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis;
+import de.thetaphi.forbiddenapis.Checker;
+import de.thetaphi.forbiddenapis.Constants;
+import de.thetaphi.forbiddenapis.Logger;
+import de.thetaphi.forbiddenapis.ParseException;
+import groovy.lang.Closure;
+import org.gradle.api.DefaultTask;
+import org.gradle.api.GradleException;
+import org.gradle.api.InvalidUserDataException;
+import org.gradle.api.Transformer;
+import org.gradle.api.file.ConfigurableFileCollection;
+import org.gradle.api.file.FileCollection;
import org.gradle.api.file.FileTree;
+import org.gradle.api.file.FileTreeElement;
+import org.gradle.api.file.ProjectLayout;
+import org.gradle.api.file.RegularFileProperty;
+import org.gradle.api.logging.Logging;
+import org.gradle.api.model.ObjectFactory;
+import org.gradle.api.provider.ListProperty;
+import org.gradle.api.provider.Property;
+import org.gradle.api.provider.SetProperty;
+import org.gradle.api.specs.Spec;
+import org.gradle.api.tasks.CacheableTask;
+import org.gradle.api.tasks.CompileClasspath;
import org.gradle.api.tasks.IgnoreEmptyDirectories;
+import org.gradle.api.tasks.Input;
+import org.gradle.api.tasks.InputDirectory;
+import org.gradle.api.tasks.InputFiles;
+import org.gradle.api.tasks.Internal;
+import org.gradle.api.tasks.Optional;
+import org.gradle.api.tasks.OutputFile;
+import org.gradle.api.tasks.PathSensitive;
+import org.gradle.api.tasks.PathSensitivity;
+import org.gradle.api.tasks.SkipWhenEmpty;
+import org.gradle.api.tasks.TaskAction;
+import org.gradle.api.tasks.VerificationTask;
+import org.gradle.api.tasks.util.PatternFilterable;
+import org.gradle.api.tasks.util.PatternSet;
+import org.gradle.workers.WorkAction;
+import org.gradle.workers.WorkParameters;
+import org.gradle.workers.WorkQueue;
+import org.gradle.workers.WorkerExecutor;
+import org.jetbrains.annotations.NotNull;
-/**
- * This implementation is used to fix gradle 8 compatibility of
- * the CheckForbiddenApis task which is built with gradle 4 support
- * in mind.
- * */
-public class CheckForbiddenApisTask extends CheckForbiddenApis {
+import java.io.File;
+import java.io.IOException;
+import java.lang.annotation.RetentionPolicy;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Objects;
+import java.util.Set;
+
+import javax.inject.Inject;
+
+import static de.thetaphi.forbiddenapis.Checker.Option.DISABLE_CLASSLOADING_CACHE;
+import static de.thetaphi.forbiddenapis.Checker.Option.FAIL_ON_MISSING_CLASSES;
+import static de.thetaphi.forbiddenapis.Checker.Option.FAIL_ON_UNRESOLVABLE_SIGNATURES;
+import static de.thetaphi.forbiddenapis.Checker.Option.FAIL_ON_VIOLATION;
+
+@CacheableTask
+public abstract class CheckForbiddenApisTask extends DefaultTask implements PatternFilterable, VerificationTask, Constants {
+
+ public static final Set BUNDLED_SIGNATURE_DEFAULTS = Set.of("jdk-unsafe", "jdk-non-portable", "jdk-system-out");
+
+ private static final String NL = System.getProperty("line.separator", "\n");
+ private final PatternSet patternSet = new PatternSet().include("**/*.class");
+ private FileCollection classesDirs;
+ private FileCollection classpath;
+ private String targetCompatibility;
+
+ private FileCollection signaturesFiles;
+
+ private final ObjectFactory objectFactory;
+ private ProjectLayout projectLayout;
+
+ private List signatures = new ArrayList<>();
+
+ private File resourcesDir;
+
+ private boolean ignoreFailures = false;
+
+ @Input
+ @Optional
+ abstract SetProperty getBundledSignatures();
+
+ /**
+ * List of a custom Java annotations (full class names) that are used in the checked
+ * code to suppress errors. Those annotations must have at least
+ * {@link RetentionPolicy#CLASS}. They can be applied to classes, their methods,
+ * or fields. By default, {@code @de.thetaphi.forbiddenapis.SuppressForbidden}
+ * can always be used, but needs the {@code forbidden-apis.jar} file in classpath
+ * of compiled project, which may not be wanted.
+ * Instead of a full class name, a glob pattern may be used (e.g.,
+ * {@code **.SuppressForbidden}).
+ */
+ @Input
+ @Optional
+ abstract SetProperty getSuppressAnnotations();
+
+ @Inject
+ public CheckForbiddenApisTask(ObjectFactory factory, ProjectLayout projectLayout) {
+ signaturesFiles = factory.fileCollection();
+ this.objectFactory = factory;
+ this.projectLayout = projectLayout;
+ }
+
+ @OutputFile
+ public File getSuccessMarker() {
+ return new File(projectLayout.getBuildDirectory().getAsFile().get(), "markers/" + this.getName());
+ }
+
+ /**
+ * Directories with the class files to check.
+ * Defaults to current sourseSet's output directory (Gradle 3) or output directories (Gradle 4.0+).
+ */
+ @Internal
+ public FileCollection getClassesDirs() {
+ return classesDirs;
+ }
+
+ /** @see #getClassesDirs() */
+ public void setClassesDirs(FileCollection classesDirs) {
+ Objects.requireNonNull(classesDirs, "classesDirs");
+ this.classesDirs = classesDirs;
+ }
+
+ /** Returns the pattern set to match against class files in {@link #getClassesDirs()}. */
+ @Internal
+ public PatternSet getPatternSet() {
+ return patternSet;
+ }
+
+ /** @see #getPatternSet() */
+ public void setPatternSet(PatternSet patternSet) {
+ patternSet.copyFrom(patternSet);
+ }
+
+ /**
+ * A {@link FileCollection} used to configure the classpath.
+ * Defaults to current sourseSet's compile classpath.
+ */
+ @CompileClasspath
+ public FileCollection getClasspath() {
+ return classpath;
+ }
+
+ /** @see #getClasspath */
+ public void setClasspath(FileCollection classpath) {
+ Objects.requireNonNull(classpath, "classpath");
+ this.classpath = classpath;
+ }
/**
- * Add additional annotation to make this input gradle 8 compliant.
- * Otherwise we see a deprecation warning here starting with gradle 7.4
- * */
+ * A {@link FileCollection} containing all files, which contain signatures and comments for forbidden API calls.
+ * The signatures are resolved against {@link #getClasspath()}.
+ */
+ @InputFiles
+ @Optional
+ @PathSensitive(PathSensitivity.RELATIVE)
+ public FileCollection getSignaturesFiles() {
+ return signaturesFiles;
+ }
+
+ @InputDirectory
+ @PathSensitive(PathSensitivity.RELATIVE)
+ public File getResourcesDir() {
+ return resourcesDir;
+ }
+
+ public void setResourcesDir(File resourcesDir) {
+ this.resourcesDir = resourcesDir;
+ }
+
+ /** @see #getSignaturesFiles */
+ public void setSignaturesFiles(FileCollection signaturesFiles) {
+ this.signaturesFiles = signaturesFiles;
+ }
+
+ public void modifyBundledSignatures(Transformer, Set> transformer) {
+ getBundledSignatures().set(transformer.transform(getBundledSignatures().get()));
+ }
+
+ public void replaceSignatureFiles(String... signatureFiles) {
+ List resources = new ArrayList<>(signatureFiles.length);
+ for (Object name : signatureFiles) {
+ resources.add(new File(resourcesDir, "forbidden/" + name + ".txt"));
+ }
+ setSignaturesFiles(objectFactory.fileCollection().from(resources));
+ }
+
+ public void addSignatureFiles(String... signatureFiles) {
+ List resources = new ArrayList<>(signatureFiles.length);
+ for (Object name : signatureFiles) {
+ resources.add(new File(resourcesDir, "forbidden/" + name + ".txt"));
+ }
+ setSignaturesFiles(objectFactory.fileCollection().from(getSignaturesFiles()).from(resources));
+
+ }
+
+ /**
+ * Gives multiple API signatures that are joined with newlines and
+ * parsed like a single {@link #getSignaturesFiles()}.
+ * The signatures are resolved against {@link #getClasspath()}.
+ */
+ @Input
+ @Optional
+ public List getSignatures() {
+ return signatures;
+ }
+
+ /** @see #getSignatures */
+ public void setSignatures(List signatures) {
+ this.signatures = signatures;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * This setting is to conform with {@link VerificationTask} interface.
+ * Default is {@code false}.
+ */
@Override
+ @Input
+ public boolean getIgnoreFailures() {
+ return ignoreFailures;
+ }
+
+ @Override
+ public void setIgnoreFailures(boolean ignoreFailures) {
+ this.ignoreFailures = ignoreFailures;
+ }
+
+ /**
+ * The default compiler target version used to expand references to bundled JDK signatures.
+ * E.g., if you use "jdk-deprecated", it will expand to this version.
+ * This setting should be identical to the target version used in the compiler task.
+ * Defaults to {@code project.targetCompatibility}.
+ */
+ @Input
+ @Optional
+ public String getTargetCompatibility() {
+ return targetCompatibility;
+ }
+
+ /** @see #getTargetCompatibility */
+ public void setTargetCompatibility(String targetCompatibility) {
+ this.targetCompatibility = targetCompatibility;
+ }
+
+ // PatternFilterable implementation:
+
+ /**
+ * {@inheritDoc}
+ *
+ * Set of patterns matching all class files to be parsed from the classesDirectory.
+ * Can be changed to e.g. exclude several files (using excludes).
+ * The default is a single include with pattern '**/*.class'
+ */
+ @Override
+ @Internal
+ public Set getIncludes() {
+ return getPatternSet().getIncludes();
+ }
+
+ @Override
+ public CheckForbiddenApisTask setIncludes(Iterable includes) {
+ getPatternSet().setIncludes(includes);
+ return this;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * Set of patterns matching class files to be excluded from checking.
+ */
+ @Override
+ @Internal
+ public Set getExcludes() {
+ return getPatternSet().getExcludes();
+ }
+
+ @Override
+ public CheckForbiddenApisTask setExcludes(Iterable excludes) {
+ getPatternSet().setExcludes(excludes);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(String... arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(Iterable arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(Spec arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask exclude(@SuppressWarnings("rawtypes") Closure arg0) {
+ getPatternSet().exclude(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(String... arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(Iterable arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(Spec arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ @Override
+ public CheckForbiddenApisTask include(@SuppressWarnings("rawtypes") Closure arg0) {
+ getPatternSet().include(arg0);
+ return this;
+ }
+
+ /** Returns the classes to check. */
+ @InputFiles
+ @SkipWhenEmpty
@IgnoreEmptyDirectories
+ @PathSensitive(PathSensitivity.RELATIVE)
public FileTree getClassFiles() {
- return super.getClassFiles();
+ return getClassesDirs().getAsFileTree().matching(getPatternSet());
+ }
+
+ @Inject
+ public abstract WorkerExecutor getWorkerExecutor();
+
+ /** Executes the forbidden apis task. */
+ @TaskAction
+ public void checkForbidden() {
+ WorkQueue workQueue = getWorkerExecutor().noIsolation();
+ workQueue.submit(ForbiddenApisCheckWorkAction.class, parameters -> {
+ parameters.getClasspath().setFrom(getClasspath());
+ parameters.getClassDirectories().setFrom(getClassesDirs());
+ parameters.getClassFiles().from(getClassFiles().getFiles());
+ parameters.getSuppressAnnotations().set(getSuppressAnnotations());
+ parameters.getBundledSignatures().set(getBundledSignatures());
+ parameters.getSignatures().set(getSignatures());
+ parameters.getTargetCompatibility().set(getTargetCompatibility());
+ parameters.getIgnoreFailures().set(getIgnoreFailures());
+ parameters.getSuccessMarker().set(getSuccessMarker());
+ });
+ }
+
+ abstract static class ForbiddenApisCheckWorkAction implements WorkAction {
+
+ private final org.gradle.api.logging.Logger logger = Logging.getLogger(getClass());
+
+ @Inject
+ public ForbiddenApisCheckWorkAction() {}
+
+ private boolean checkIsUnsupportedJDK(Checker checker) {
+ if (checker.isSupportedJDK == false) {
+ final String msg = String.format(
+ Locale.ENGLISH,
+ "Your Java runtime (%s %s) is not supported by the forbiddenapis plugin. Please run the checks with a supported JDK!",
+ System.getProperty("java.runtime.name"),
+ System.getProperty("java.runtime.version")
+ );
+ logger.warn(msg);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void execute() {
+
+ final URLClassLoader urlLoader = createClassLoader(getParameters().getClasspath(), getParameters().getClassDirectories());
+ try {
+ final Checker checker = createChecker(urlLoader);
+ if (checkIsUnsupportedJDK(checker)) {
+ return;
+ }
+
+ final Set suppressAnnotations = getParameters().getSuppressAnnotations().get();
+ for (String a : suppressAnnotations) {
+ checker.addSuppressAnnotation(a);
+ }
+
+ try {
+ final Set bundledSignatures = getParameters().getBundledSignatures().get();
+ if (bundledSignatures.isEmpty() == false) {
+ final String bundledSigsJavaVersion = getParameters().getTargetCompatibility().get();
+ if (bundledSigsJavaVersion == null) {
+ logger.warn(
+ "The 'targetCompatibility' project or task property is missing. "
+ + "Trying to read bundled JDK signatures without compiler target. "
+ + "You have to explicitly specify the version in the resource name."
+ );
+ }
+ for (String bs : bundledSignatures) {
+ checker.addBundledSignatures(bs, bundledSigsJavaVersion);
+ }
+ }
+
+ final FileCollection signaturesFiles = getParameters().getSignaturesFiles();
+ if (signaturesFiles != null) for (final File f : signaturesFiles) {
+ checker.parseSignaturesFile(f);
+ }
+ final List signatures = getParameters().getSignatures().get();
+ if ((signatures != null) && !signatures.isEmpty()) {
+ final StringBuilder sb = new StringBuilder();
+ for (String line : signatures) {
+ sb.append(line).append(NL);
+ }
+ checker.parseSignaturesString(sb.toString());
+ }
+ } catch (IOException ioe) {
+ throw new GradleException("IO problem while reading files with API signatures.", ioe);
+ } catch (ParseException pe) {
+ throw new InvalidUserDataException("Parsing signatures failed: " + pe.getMessage(), pe);
+ }
+
+ if (checker.hasNoSignatures()) {
+ if (checker.noSignaturesFilesParsed()) {
+ throw new InvalidUserDataException(
+ "No signatures were added to task; use properties 'signatures', 'bundledSignatures', 'signaturesURLs', and/or 'signaturesFiles' to define those!"
+ );
+ } else {
+ logger.info("Skipping execution because no API signatures are available.");
+ return;
+ }
+ }
+
+ try {
+ checker.addClassesToCheck(getParameters().getClassFiles());
+ } catch (IOException ioe) {
+ throw new GradleException("Failed to load one of the given class files.", ioe);
+ }
+ checker.run();
+ writeMarker(getParameters().getSuccessMarker().getAsFile().get());
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ // Close the classloader to free resources:
+ try {
+ if (urlLoader != null) urlLoader.close();
+ } catch (IOException ioe) {
+ // getLogger().warn("Cannot close classloader: ".concat(ioe.toString()));
+ }
+ }
+ }
+
+ private void writeMarker(File successMarker) throws IOException {
+ Files.write(successMarker.toPath(), new byte[] {}, StandardOpenOption.CREATE);
+ }
+
+ private URLClassLoader createClassLoader(FileCollection classpath, FileCollection classesDirs) {
+ if (classesDirs == null || classpath == null) {
+ throw new InvalidUserDataException("Missing 'classesDirs' or 'classpath' property.");
+ }
+
+ final Set cpElements = new LinkedHashSet<>();
+ cpElements.addAll(classpath.getFiles());
+ cpElements.addAll(classesDirs.getFiles());
+ final URL[] urls = new URL[cpElements.size()];
+ try {
+ int i = 0;
+ for (final File cpElement : cpElements) {
+ urls[i++] = cpElement.toURI().toURL();
+ }
+ assert i == urls.length;
+ } catch (MalformedURLException mfue) {
+ throw new InvalidUserDataException("Failed to build classpath URLs.", mfue);
+ }
+
+ return URLClassLoader.newInstance(urls, ClassLoader.getSystemClassLoader());
+ }
+
+ @NotNull
+ private Checker createChecker(URLClassLoader urlLoader) {
+ final EnumSet options = EnumSet.noneOf(Checker.Option.class);
+ options.add(FAIL_ON_MISSING_CLASSES);
+ if (getParameters().getIgnoreFailures().get() == false) {
+ options.add(FAIL_ON_VIOLATION);
+ }
+ options.add(FAIL_ON_UNRESOLVABLE_SIGNATURES);
+ options.add(DISABLE_CLASSLOADING_CACHE);
+ final Checker checker = new Checker(new GradleForbiddenApiLogger(logger), urlLoader, options);
+ return checker;
+ }
+
+ private static class GradleForbiddenApiLogger implements Logger {
+
+ private final org.gradle.api.logging.Logger delegate;
+
+ GradleForbiddenApiLogger(org.gradle.api.logging.Logger delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void error(String msg) {
+ delegate.error(msg);
+ }
+
+ @Override
+ public void warn(String msg) {
+ delegate.warn(msg);
+ }
+
+ @Override
+ public void info(String msg) {
+ delegate.info(msg);
+ }
+
+ @Override
+ public void debug(String msg) {
+ delegate.debug(msg);
+ }
+ };
}
+
+ interface Parameters extends WorkParameters {
+ ConfigurableFileCollection getClassDirectories();
+
+ ConfigurableFileCollection getClassFiles();
+
+ ConfigurableFileCollection getClasspath();
+
+ SetProperty getSuppressAnnotations();
+
+ RegularFileProperty getSuccessMarker();
+
+ ConfigurableFileCollection getSignaturesFiles();
+
+ SetProperty getBundledSignatures();
+
+ Property getTargetCompatibility();
+
+ Property getIgnoreFailures();
+
+ ListProperty getSignatures();
+
+ }
+
}
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java
index 71de2626d5fc..092230a2b12e 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/DependencyLicensesTask.java
@@ -88,8 +88,6 @@ public class DependencyLicensesTask extends DefaultTask {
private final Logger logger = Logging.getLogger(getClass());
- private static final String SHA_EXTENSION = ".sha1";
-
// TODO: we should be able to default this to eg compile deps, but we need to move the licenses
// check from distribution to core (ie this should only be run on java projects)
/**
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java
index 96fb11214902..e24dd5ab2094 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ForbiddenApisPrecommitPlugin.java
@@ -8,50 +8,37 @@
package org.elasticsearch.gradle.internal.precommit;
-import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApisExtension;
-import groovy.lang.Closure;
-
import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask;
import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin;
import org.elasticsearch.gradle.internal.info.BuildParams;
import org.gradle.api.Project;
import org.gradle.api.Task;
-import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.plugins.JavaBasePlugin;
-import org.gradle.api.plugins.JavaPluginExtension;
import org.gradle.api.specs.Specs;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.TaskProvider;
-import java.nio.file.Path;
-import java.util.ArrayList;
-import java.util.List;
+import java.io.File;
import java.util.Set;
-import static de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin.FORBIDDEN_APIS_EXTENSION_NAME;
import static de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin.FORBIDDEN_APIS_TASK_NAME;
+import static org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask.BUNDLED_SIGNATURE_DEFAULTS;
public class ForbiddenApisPrecommitPlugin extends PrecommitPlugin {
+
@Override
public TaskProvider extends Task> createTask(Project project) {
project.getPluginManager().apply(JavaBasePlugin.class);
- // create Extension for defaults:
- var checkForbiddenApisExtension = project.getExtensions()
- .create(FORBIDDEN_APIS_EXTENSION_NAME, CheckForbiddenApisExtension.class, project);
-
// Create a convenience task for all checks (this does not conflict with extension, as it has higher priority in DSL):
var forbiddenTask = project.getTasks()
.register(FORBIDDEN_APIS_TASK_NAME, task -> { task.setDescription("Runs forbidden-apis checks."); });
- JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class);
- // Define our tasks (one for each SourceSet):
-
TaskProvider resourcesTask = project.getTasks()
.register("forbiddenApisResources", ExportElasticsearchBuildResourcesTask.class);
- Path resourcesDir = project.getBuildDir().toPath().resolve("forbidden-apis-config");
+ File resourcesDir = project.getLayout().getBuildDirectory().dir("forbidden-apis-config").get().getAsFile();
resourcesTask.configure(t -> {
- t.setOutputDir(resourcesDir.toFile());
+ t.setOutputDir(resourcesDir);
t.copy("forbidden/jdk-signatures.txt");
t.copy("forbidden/jdk-deprecated.txt");
t.copy("forbidden/es-all-signatures.txt");
@@ -65,60 +52,36 @@ public TaskProvider extends Task> createTask(Project project) {
String sourceSetTaskName = sourceSet.getTaskName(FORBIDDEN_APIS_TASK_NAME, null);
var sourceSetTask = project.getTasks().register(sourceSetTaskName, CheckForbiddenApisTask.class, t -> {
t.setDescription("Runs forbidden-apis checks on '${sourceSet.name}' classes.");
+ t.setResourcesDir(resourcesDir);
t.getOutputs().upToDateWhen(Specs.SATISFIES_ALL);
t.setClassesDirs(sourceSet.getOutput().getClassesDirs());
t.dependsOn(resourcesTask);
- t.setClasspath(sourceSet.getRuntimeClasspath().plus(sourceSet.getCompileClasspath()).plus(sourceSet.getOutput()));
+ t.setClasspath(sourceSet.getRuntimeClasspath().plus(sourceSet.getCompileClasspath()));
t.setTargetCompatibility(BuildParams.getMinimumRuntimeVersion().getMajorVersion());
- t.setBundledSignatures(Set.of("jdk-unsafe", "jdk-non-portable", "jdk-system-out"));
+ t.getBundledSignatures().set(BUNDLED_SIGNATURE_DEFAULTS);
t.setSignaturesFiles(
project.files(
- resourcesDir.resolve("forbidden/jdk-signatures.txt"),
- resourcesDir.resolve("forbidden/es-all-signatures.txt"),
- resourcesDir.resolve("forbidden/jdk-deprecated.txt")
+ resourcesDir.toPath().resolve("forbidden/jdk-signatures.txt"),
+ resourcesDir.toPath().resolve("forbidden/es-all-signatures.txt"),
+ resourcesDir.toPath().resolve("forbidden/jdk-deprecated.txt")
)
);
- t.setSuppressAnnotations(Set.of("**.SuppressForbidden"));
+ t.getSuppressAnnotations().set(Set.of("**.SuppressForbidden"));
if (t.getName().endsWith("Test")) {
t.setSignaturesFiles(
t.getSignaturesFiles()
.plus(
project.files(
- resourcesDir.resolve("forbidden/es-test-signatures.txt"),
- resourcesDir.resolve("forbidden/http-signatures.txt")
+ resourcesDir.toPath().resolve("forbidden/es-test-signatures.txt"),
+ resourcesDir.toPath().resolve("forbidden/http-signatures.txt")
)
)
);
} else {
t.setSignaturesFiles(
- t.getSignaturesFiles().plus(project.files(resourcesDir.resolve("forbidden/es-server-signatures.txt")))
+ t.getSignaturesFiles().plus(project.files(resourcesDir.toPath().resolve("forbidden/es-server-signatures.txt")))
);
}
- ExtraPropertiesExtension ext = t.getExtensions().getExtraProperties();
- ext.set("replaceSignatureFiles", new Closure(t) {
- @Override
- public Void call(Object... names) {
- List resources = new ArrayList<>(names.length);
- for (Object name : names) {
- resources.add(resourcesDir.resolve("forbidden/" + name + ".txt"));
- }
- t.setSignaturesFiles(project.files(resources));
- return null;
- }
-
- });
- ext.set("addSignatureFiles", new Closure(t) {
- @Override
- public Void call(Object... names) {
- List resources = new ArrayList<>(names.length);
- for (Object name : names) {
- resources.add(resourcesDir.resolve("forbidden/" + name + ".txt"));
- }
- t.setSignaturesFiles(t.getSignaturesFiles().plus(project.files(resources)));
- return null;
- }
- });
-
});
forbiddenTask.configure(t -> t.dependsOn(sourceSetTask));
});
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java
index 0059913ad086..559d7536c310 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/LoggerUsageTask.java
@@ -52,7 +52,7 @@ public LoggerUsageTask(ObjectFactory objectFactory) {
}
@Inject
- abstract public WorkerExecutor getWorkerExecutor();
+ public abstract WorkerExecutor getWorkerExecutor();
@TaskAction
public void runLoggerUsageTask() {
diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt
index d19500c3c332..34f39bbc4ca5 100644
--- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt
+++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt
@@ -160,3 +160,7 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions()
org.elasticsearch.cluster.ClusterFeatures#nodeFeatures()
@defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster.
org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature)
+
+@defaultMessage Use a Thread constructor with a name, anonymous threads are more difficult to debug
+java.lang.Thread#(java.lang.Runnable)
+java.lang.Thread#(java.lang.ThreadGroup, java.lang.Runnable)
diff --git a/build-tools-internal/src/main/resources/forbidden/jdk-deprecated.txt b/build-tools-internal/src/main/resources/forbidden/jdk-deprecated.txt
index 7a0c8f968507..d7545a5a5fac 100644
--- a/build-tools-internal/src/main/resources/forbidden/jdk-deprecated.txt
+++ b/build-tools-internal/src/main/resources/forbidden/jdk-deprecated.txt
@@ -210,7 +210,6 @@ java.lang.Thread#countStackFrames()
java.lang.Thread#resume()
java.lang.Thread#stop()
java.lang.Thread#suspend()
-java.lang.ThreadGroup#allowThreadSuspension(boolean)
java.lang.ThreadGroup#checkAccess()
java.lang.ThreadGroup#destroy()
java.lang.ThreadGroup#isDaemon()
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
index bfc1b1e6be96..b0998957910a 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java
@@ -232,15 +232,7 @@ public class RestHighLevelClient implements Closeable {
* {@link RestClient} to be used to perform requests.
*/
public RestHighLevelClient(RestClientBuilder restClientBuilder) {
- this(restClientBuilder, Collections.emptyList());
- }
-
- /**
- * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
- * {@link RestClient} to be used to perform requests and parsers for custom response sections added to Elasticsearch through plugins.
- */
- protected RestHighLevelClient(RestClientBuilder restClientBuilder, List namedXContentEntries) {
- this(restClientBuilder.build(), RestClient::close, namedXContentEntries);
+ this(restClientBuilder.build(), RestClient::close, Collections.emptyList());
}
/**
@@ -265,7 +257,7 @@ protected RestHighLevelClient(
* The consumer argument allows to control what needs to be done when the {@link #close()} method is called.
* Also subclasses can provide parsers for custom response sections added to Elasticsearch through plugins.
*/
- protected RestHighLevelClient(
+ private RestHighLevelClient(
RestClient restClient,
CheckedConsumer doClose,
List namedXContentEntries,
@@ -309,17 +301,6 @@ public final void close() throws IOException {
doClose.accept(client);
}
- /**
- * Executes a bulk request using the Bulk API.
- * See Bulk API on elastic.co
- * @param bulkRequest the request
- * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
- * @return the response
- */
- public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException {
- return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet());
- }
-
/**
* Asynchronously executes a bulk request using the Bulk API.
* See Bulk API on elastic.co
@@ -410,7 +391,7 @@ public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, Requ
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
- protected final Resp performRequestAndParseEntity(
+ private Resp performRequestAndParseEntity(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -425,7 +406,7 @@ protected final Resp performRequestAndParseEnt
* layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}.
*/
@Deprecated
- protected final Resp performRequest(
+ private Resp performRequest(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -439,23 +420,6 @@ protected final Resp performRequest(
return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
}
- /**
- * Defines a helper method for performing a request.
- */
- protected final Resp performRequest(
- Req request,
- CheckedFunction requestConverter,
- RequestOptions options,
- CheckedFunction responseConverter,
- Set ignores
- ) throws IOException {
- Optional validationException = request.validate();
- if (validationException != null && validationException.isPresent()) {
- throw validationException.get();
- }
- return internalPerformRequest(request, requestConverter, options, responseConverter, ignores);
- }
-
/**
* Provides common functionality for performing a request.
*/
@@ -499,7 +463,7 @@ private Resp internalPerformRequest(
* @return Cancellable instance that may be used to cancel the request
*/
@Deprecated
- protected final Cancellable performRequestAsyncAndParseEntity(
+ private Cancellable performRequestAsyncAndParseEntity(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -523,7 +487,7 @@ protected final Cancellable performRequestAsyn
* @return Cancellable instance that may be used to cancel the request
*/
@Deprecated
- protected final Cancellable performRequestAsync(
+ private Cancellable performRequestAsync(
Req request,
CheckedFunction requestConverter,
RequestOptions options,
@@ -564,7 +528,7 @@ private Cancellable internalPerformRequestAsync(
return performClientRequestAsync(req, responseListener);
}
- final ResponseListener wrapResponseListener(
+ private ResponseListener wrapResponseListener(
CheckedFunction responseConverter,
ActionListener actionListener,
Set ignores
@@ -611,7 +575,7 @@ public void onFailure(Exception exception) {
* that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned
* exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing.
*/
- protected final ElasticsearchStatusException parseResponseException(ResponseException responseException) {
+ private ElasticsearchStatusException parseResponseException(ResponseException responseException) {
Response response = responseException.getResponse();
HttpEntity entity = response.getEntity();
ElasticsearchStatusException elasticsearchException;
@@ -631,7 +595,7 @@ protected final ElasticsearchStatusException parseResponseException(ResponseExce
return elasticsearchException;
}
- protected final Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser)
+ private Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser)
throws IOException {
if (entity == null) {
throw new IllegalStateException("Response body expected but not returned");
@@ -735,7 +699,7 @@ private Cancellable performClientRequestAsync(Request request, ResponseListener
ListenableFuture> versionCheck = getVersionValidationFuture();
// Create a future that tracks cancellation of this method's result and forwards cancellation to the actual LLRC request.
- CompletableFuture cancellationForwarder = new CompletableFuture();
+ CompletableFuture cancellationForwarder = new CompletableFuture<>();
Cancellable result = new Cancellable() {
@Override
public void cancel() {
@@ -754,7 +718,7 @@ void runIfNotCancelled(Runnable runnable) {
// Send the request after we have done the version compatibility check. Note that if it has already happened, the listener will
// be called immediately on the same thread with no asynchronous scheduling overhead.
- versionCheck.addListener(new ActionListener>() {
+ versionCheck.addListener(new ActionListener<>() {
@Override
public void onResponse(Optional validation) {
if (validation.isPresent() == false) {
@@ -779,13 +743,13 @@ public void onFailure(Exception e) {
});
return result;
- };
+ }
/**
* Go through all the request's existing headers, looking for {@code headerName} headers and if they exist,
* changing them to use version compatibility. If no request headers are changed, modify the entity type header if appropriate
*/
- boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHeader, String headerName) {
+ private boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHeader, String headerName) {
// Modify any existing "Content-Type" headers on the request to use the version compatibility, if available
boolean contentTypeModified = false;
for (Header header : new ArrayList<>(newOptions.getHeaders())) {
@@ -807,7 +771,7 @@ boolean addCompatibilityFor(RequestOptions.Builder newOptions, Header entityHead
* Modify the given header to be version compatible, if necessary.
* Returns true if a modification was made, false otherwise.
*/
- boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String headerName) {
+ private boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String headerName) {
for (EntityType type : EntityType.values()) {
final String headerValue = header.getValue();
if (headerValue.startsWith(type.header())) {
@@ -825,7 +789,7 @@ boolean modifyHeader(RequestOptions.Builder newOptions, Header header, String he
* modifying the "Content-Type" and "Accept" headers if present, or modifying the header based
* on the request's entity type.
*/
- void modifyRequestForCompatibility(Request request) {
+ private void modifyRequestForCompatibility(Request request) {
final Header entityHeader = request.getEntity() == null ? null : request.getEntity().getContentType();
final RequestOptions.Builder newOptions = request.getOptions().toBuilder();
@@ -982,7 +946,7 @@ private Optional getVersionValidation(Response response) throws IOExcept
return Optional.empty();
}
- static List getDefaultNamedXContents() {
+ private static List getDefaultNamedXContents() {
Map> map = new HashMap<>();
map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c));
map.put(InternalHDRPercentiles.NAME, (p, c) -> ParsedHDRPercentiles.fromXContent(p, (String) c));
@@ -1068,7 +1032,7 @@ static List getDefaultNamedXContents() {
/**
* Loads and returns the {@link NamedXContentRegistry.Entry} parsers provided by plugins.
*/
- static List getProvidedNamedXContents() {
+ private static List getProvidedNamedXContents() {
List entries = new ArrayList<>();
for (NamedXContentProvider service : ServiceLoader.load(NamedXContentProvider.class)) {
entries.addAll(service.getNamedXContentParsers());
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java
deleted file mode 100644
index 7adcee74cb20..000000000000
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-
-package org.elasticsearch.client.core;
-
-import org.elasticsearch.xcontent.ConstructingObjectParser;
-import org.elasticsearch.xcontent.ParseField;
-import org.elasticsearch.xcontent.XContentParser;
-
-import java.io.IOException;
-import java.util.Objects;
-import java.util.function.Function;
-
-import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
-
-public class AcknowledgedResponse {
-
- protected static final String PARSE_FIELD_NAME = "acknowledged";
- private static final ConstructingObjectParser PARSER = AcknowledgedResponse.generateParser(
- "acknowledged_response",
- AcknowledgedResponse::new,
- AcknowledgedResponse.PARSE_FIELD_NAME
- );
-
- private final boolean acknowledged;
-
- public AcknowledgedResponse(final boolean acknowledged) {
- this.acknowledged = acknowledged;
- }
-
- public boolean isAcknowledged() {
- return acknowledged;
- }
-
- protected static ConstructingObjectParser generateParser(String name, Function ctor, String parseField) {
- ConstructingObjectParser p = new ConstructingObjectParser<>(name, true, args -> ctor.apply((boolean) args[0]));
- p.declareBoolean(constructorArg(), new ParseField(parseField));
- return p;
- }
-
- public static AcknowledgedResponse fromXContent(final XContentParser parser) throws IOException {
- return PARSER.parse(parser, null);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- final AcknowledgedResponse that = (AcknowledgedResponse) o;
- return isAcknowledged() == that.isAcknowledged();
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(acknowledged);
- }
-
- /**
- * @return the field name this response uses to output the acknowledged flag
- */
- protected String getFieldName() {
- return PARSE_FIELD_NAME;
- }
-}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/ShardsAcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/ShardsAcknowledgedResponse.java
deleted file mode 100644
index a80a6bb2a15b..000000000000
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/ShardsAcknowledgedResponse.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0 and the Server Side Public License, v 1; you may not use this file except
- * in compliance with, at your election, the Elastic License 2.0 or the Server
- * Side Public License, v 1.
- */
-package org.elasticsearch.client.core;
-
-import org.elasticsearch.xcontent.ConstructingObjectParser;
-import org.elasticsearch.xcontent.ParseField;
-import org.elasticsearch.xcontent.XContentParser;
-
-import java.io.IOException;
-
-import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg;
-
-public class ShardsAcknowledgedResponse extends AcknowledgedResponse {
-
- protected static final String SHARDS_PARSE_FIELD_NAME = "shards_acknowledged";
-
- private static ConstructingObjectParser buildParser() {
-
- ConstructingObjectParser p = new ConstructingObjectParser<>(
- "freeze",
- true,
- args -> new ShardsAcknowledgedResponse((boolean) args[0], (boolean) args[1])
- );
- p.declareBoolean(constructorArg(), new ParseField(AcknowledgedResponse.PARSE_FIELD_NAME));
- p.declareBoolean(constructorArg(), new ParseField(SHARDS_PARSE_FIELD_NAME));
- return p;
- }
-
- private static final ConstructingObjectParser PARSER = buildParser();
-
- private final boolean shardsAcknowledged;
-
- public ShardsAcknowledgedResponse(boolean acknowledged, boolean shardsAcknowledged) {
- super(acknowledged);
- this.shardsAcknowledged = shardsAcknowledged;
- }
-
- public boolean isShardsAcknowledged() {
- return shardsAcknowledged;
- }
-
- public static ShardsAcknowledgedResponse fromXContent(XContentParser parser) throws IOException {
- return PARSER.parse(parser, null);
- }
-}
diff --git a/client/rest/build.gradle b/client/rest/build.gradle
index 85d38b007e63..6006fae1c2d8 100644
--- a/client/rest/build.gradle
+++ b/client/rest/build.gradle
@@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
-import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
+import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.internal.conventions.precommit.LicenseHeadersTask
@@ -60,7 +60,7 @@ tasks.named("processResources").configure {
]
}
-tasks.withType(CheckForbiddenApis).configureEach {
+tasks.withType(CheckForbiddenApisTask).configureEach {
//client does not depend on server, so only jdk and http signatures should be checked
replaceSignatureFiles('jdk-signatures', 'http-signatures')
}
@@ -71,8 +71,11 @@ tasks.named("forbiddenPatterns").configure {
tasks.named('forbiddenApisTest').configure {
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
+ modifyBundledSignatures { signatures ->
+ signatures -= 'jdk-non-portable'
+ signatures += 'jdk-internal'
+ signatures
+ }
}
// JarHell is part of es server, which we don't want to pull in
diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle
index 546e81445bb8..901917c7b25f 100644
--- a/client/sniffer/build.gradle
+++ b/client/sniffer/build.gradle
@@ -57,8 +57,12 @@ tasks.named('forbiddenApisMain').configure {
tasks.named('forbiddenApisTest').configure {
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
+ modifyBundledSignatures { bundledSignatures ->
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+ bundledSignatures
+ }
+
//client does not depend on server, so only jdk signatures should be checked
replaceSignatureFiles 'jdk-signatures'
}
diff --git a/client/test/build.gradle b/client/test/build.gradle
index 18eb16883ab1..9ee222b036cd 100644
--- a/client/test/build.gradle
+++ b/client/test/build.gradle
@@ -40,8 +40,11 @@ tasks.named('forbiddenApisMain').configure {
tasks.named('forbiddenApisTest').configure {
//we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
- bundledSignatures -= 'jdk-non-portable'
- bundledSignatures += 'jdk-internal'
+ modifyBundledSignatures { bundledSignatures ->
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+ bundledSignatures
+ }
//client does not depend on core, so only jdk signatures should be checked
replaceSignatureFiles 'jdk-signatures'
}
diff --git a/distribution/tools/cli-launcher/src/main/java/org/elasticsearch/launcher/CliToolLauncher.java b/distribution/tools/cli-launcher/src/main/java/org/elasticsearch/launcher/CliToolLauncher.java
index 700c5bc2b321..4fd2512f2cbb 100644
--- a/distribution/tools/cli-launcher/src/main/java/org/elasticsearch/launcher/CliToolLauncher.java
+++ b/distribution/tools/cli-launcher/src/main/java/org/elasticsearch/launcher/CliToolLauncher.java
@@ -94,7 +94,7 @@ static Thread createShutdownHook(Terminal terminal, Closeable closeable) {
e.printStackTrace(terminal.getErrorWriter());
}
terminal.flush(); // make sure to flush whatever the close or error might have written
- });
+ }, "elasticsearch-cli-shutdown");
}
diff --git a/distribution/tools/server-cli/build.gradle b/distribution/tools/server-cli/build.gradle
index 3ab5e6e86f5b..623f9d40cd49 100644
--- a/distribution/tools/server-cli/build.gradle
+++ b/distribution/tools/server-cli/build.gradle
@@ -5,7 +5,7 @@
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
-import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
+import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask
apply plugin: 'elasticsearch.build'
@@ -20,7 +20,7 @@ tasks.named("test").configure {
systemProperty "tests.security.manager", "false"
}
-tasks.withType(CheckForbiddenApis).configureEach {
+tasks.withType(CheckForbiddenApisTask).configureEach {
replaceSignatureFiles 'jdk-signatures'
}
diff --git a/docs/changelog/101426.yaml b/docs/changelog/101426.yaml
new file mode 100644
index 000000000000..f9053ba1c1ec
--- /dev/null
+++ b/docs/changelog/101426.yaml
@@ -0,0 +1,5 @@
+pr: 101426
+summary: Add undesired shard count
+area: Allocation
+type: enhancement
+issues: []
diff --git a/docs/changelog/101629.yaml b/docs/changelog/101629.yaml
new file mode 100644
index 000000000000..1b8691c9798f
--- /dev/null
+++ b/docs/changelog/101629.yaml
@@ -0,0 +1,5 @@
+pr: 101629
+summary: Health report infrastructure doesn't trip the circuit breakers
+area: Health
+type: bug
+issues: []
diff --git a/docs/changelog/101648.yaml b/docs/changelog/101648.yaml
new file mode 100644
index 000000000000..48e01739aabc
--- /dev/null
+++ b/docs/changelog/101648.yaml
@@ -0,0 +1,6 @@
+pr: 101648
+summary: "ESQL: Fix unreleased block in topn"
+area: ES|QL
+type: bug
+issues:
+ - 101588
diff --git a/docs/changelog/101652.yaml b/docs/changelog/101652.yaml
new file mode 100644
index 000000000000..79e3167696ae
--- /dev/null
+++ b/docs/changelog/101652.yaml
@@ -0,0 +1,5 @@
+pr: 101652
+summary: Fix race condition in `SnapshotsService`
+area: Snapshot/Restore
+type: bug
+issues: []
diff --git a/docs/changelog/101713.yaml b/docs/changelog/101713.yaml
new file mode 100644
index 000000000000..c3addf929658
--- /dev/null
+++ b/docs/changelog/101713.yaml
@@ -0,0 +1,5 @@
+pr: 101713
+summary: Disable `weight_matches` when kNN query is present
+area: Highlighting
+type: bug
+issues: []
diff --git a/docs/changelog/98916.yaml b/docs/changelog/98916.yaml
new file mode 100644
index 000000000000..a466e3deba00
--- /dev/null
+++ b/docs/changelog/98916.yaml
@@ -0,0 +1,5 @@
+pr: 98916
+summary: Make knn search a query
+area: Vector Search
+type: feature
+issues: []
diff --git a/docs/painless/painless-contexts/painless-reindex-context.asciidoc b/docs/painless/painless-contexts/painless-reindex-context.asciidoc
index 13b216bac634..9aae1ae70c5a 100644
--- a/docs/painless/painless-contexts/painless-reindex-context.asciidoc
+++ b/docs/painless/painless-contexts/painless-reindex-context.asciidoc
@@ -19,7 +19,7 @@ reindexed into a target index.
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`)::
The name of the index.
-{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
+{ref}/mapping-id-field.html[`ctx['_id']`] (`String`)::
The unique document id.
`ctx['_version']` (`int`)::
diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc
index d8f9d4d7bae7..78a8b8d36d6b 100644
--- a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc
+++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc
@@ -20,7 +20,7 @@ result of query.
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
The name of the index.
-{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
+{ref}/mapping-id-field.html[`ctx['_id']`] (`String`, read-only)::
The unique document id.
`ctx['_version']` (`int`, read-only)::
diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc
index f9ae3434827d..53b1008cfebf 100644
--- a/docs/painless/painless-contexts/painless-update-context.asciidoc
+++ b/docs/painless/painless-contexts/painless-update-context.asciidoc
@@ -18,7 +18,7 @@ add, modify, or delete fields within a single document.
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
The name of the index.
-{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
+{ref}/mapping-id-field.html[`ctx['_id']`] (`String`, read-only)::
The unique document id.
`ctx['_version']` (`int`, read-only)::
diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc
index bd99f1d737bd..2628b5abca9f 100644
--- a/docs/reference/cluster/get-desired-balance.asciidoc
+++ b/docs/reference/cluster/get-desired-balance.asciidoc
@@ -6,7 +6,12 @@
NOTE: {cloud-only}
-Exposes the desired balance and basic metrics.
+Exposes:
+* the desired balance computation and reconciliation stats
+* balancing stats such as distribution of shards, disk and ingest forecasts
+ across nodes and data tiers (based on the current cluster state)
+* routing table with each shard current and desired location
+* cluster info with nodes disk usages
[[get-desired-balance-request]]
==== {api-request-title}
@@ -33,6 +38,8 @@ The API returns the following result:
"reconciliation_time_in_millis": 0
},
"cluster_balance_stats" : {
+ "shard_count": 37,
+ "undesired_shard_allocation_count": 0,
"tiers": {
"data_hot" : {
"shard_count" : {
@@ -42,6 +49,13 @@ The API returns the following result:
"average" : 2.3333333333333335,
"std_dev" : 0.4714045207910317
},
+ "undesired_shard_allocation_count" : {
+ "total" : 0.0,
+ "min" : 0.0,
+ "max" : 0.0,
+ "average" : 0.0,
+ "std_dev" : 0.0
+ },
"forecast_write_load" : {
"total" : 21.0,
"min" : 6.0,
@@ -72,6 +86,13 @@ The API returns the following result:
"average" : 1.0,
"std_dev" : 0.0
},
+ "undesired_shard_allocation_count" : {
+ "total" : 0.0,
+ "min" : 0.0,
+ "max" : 0.0,
+ "average" : 0.0,
+ "std_dev" : 0.0
+ },
"forecast_write_load" : {
"total" : 0.0,
"min" : 0.0,
@@ -100,6 +121,7 @@ The API returns the following result:
"node_id": "UPYt8VwWTt-IADAEbqpLxA",
"roles": ["data_content"],
"shard_count": 10,
+ "undesired_shard_allocation_count": 0,
"forecast_write_load": 8.5,
"forecast_disk_usage_bytes": 498435,
"actual_disk_usage_bytes": 498435
@@ -108,6 +130,7 @@ The API returns the following result:
"node_id": "bgC66tboTIeFQ0VgRGI4Gg",
"roles": ["data_content"],
"shard_count": 15,
+ "undesired_shard_allocation_count": 0,
"forecast_write_load": 3.25,
"forecast_disk_usage_bytes": 384935,
"actual_disk_usage_bytes": 384935
@@ -116,6 +139,7 @@ The API returns the following result:
"node_id": "2x1VTuSOQdeguXPdN73yRw",
"roles": ["data_content"],
"shard_count": 12,
+ "undesired_shard_allocation_count": 0,
"forecast_write_load": 6.0,
"forecast_disk_usage_bytes": 648766,
"actual_disk_usage_bytes": 648766
diff --git a/docs/reference/eql/syntax.asciidoc b/docs/reference/eql/syntax.asciidoc
index f592610f487c..33a6fb745ac5 100644
--- a/docs/reference/eql/syntax.asciidoc
+++ b/docs/reference/eql/syntax.asciidoc
@@ -243,7 +243,7 @@ my_field like ("Value-*", "VALUE2", "VAL?") // case-sensitive
my_field like~ ("value-*", "value2", "val?") // case-insensitive
my_field regex ("[vV]alue-[0-9]", "VALUE[^2].?", "VAL3") // case-sensitive
-my_field regex~ ("value-[0-9]", "value[^2].?", "val3") // case-sensitive
+my_field regex~ ("value-[0-9]", "value[^2].?", "val3") // case-insensitive
----
`in` (case-sensitive)::
diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc
index 874bfa64d355..c9c3e1645861 100644
--- a/docs/reference/inference/delete-inference.asciidoc
+++ b/docs/reference/inference/delete-inference.asciidoc
@@ -2,6 +2,8 @@
[[delete-inference-api]]
=== Delete {infer} API
+experimental[]
+
Deletes an {infer} model deployment.
diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc
index bbf1d59c5621..b81f2663ec9e 100644
--- a/docs/reference/inference/get-inference.asciidoc
+++ b/docs/reference/inference/get-inference.asciidoc
@@ -2,6 +2,8 @@
[[get-inference-api]]
=== Get {infer} API
+experimental[]
+
Retrieves {infer} model information.
[discrete]
diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc
index ec1f01bc4d09..0476ac57287d 100644
--- a/docs/reference/inference/inference-apis.asciidoc
+++ b/docs/reference/inference/inference-apis.asciidoc
@@ -2,6 +2,8 @@
[[inference-apis]]
== {infer-cap} APIs
+experimental[]
+
You can use the following APIs to manage {infer} models and perform {infer}:
* <>
diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc
index 99dd4a059519..f26a73d09309 100644
--- a/docs/reference/inference/post-inference.asciidoc
+++ b/docs/reference/inference/post-inference.asciidoc
@@ -2,6 +2,8 @@
[[post-inference-api]]
=== Perform inference API
+experimental[]
+
Performs an inference task on an input text by using an {infer} model.
diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc
index f4737875971c..3b8cd19aded5 100644
--- a/docs/reference/inference/put-inference.asciidoc
+++ b/docs/reference/inference/put-inference.asciidoc
@@ -2,6 +2,8 @@
[[put-inference-api]]
=== Create {infer} API
+experimental[]
+
Creates a model to perform an {infer} task.
diff --git a/docs/reference/query-dsl/knn-query.asciidoc b/docs/reference/query-dsl/knn-query.asciidoc
new file mode 100644
index 000000000000..f9cc31748ef7
--- /dev/null
+++ b/docs/reference/query-dsl/knn-query.asciidoc
@@ -0,0 +1,222 @@
+[[query-dsl-knn-query]]
+=== Knn query
+++++
+Knn
+++++
+
+Finds the _k_ nearest vectors to a query vector, as measured by a similarity
+metric. _knn_ query finds nearest vectors through approximate search on indexed
+dense_vectors. The preferred way to do approximate kNN search is through the
+<> of a search request. _knn_ query is reserved for
+expert cases, where there is a need to combine this query with other queries.
+
+[[knn-query-ex-request]]
+==== Example request
+
+[source,console]
+----
+PUT my-image-index
+{
+ "mappings": {
+ "properties": {
+ "image-vector": {
+ "type": "dense_vector",
+ "dims": 3,
+ "index": true,
+ "similarity": "l2_norm"
+ },
+ "file-type": {
+ "type": "keyword"
+ }
+ }
+ }
+}
+----
+
+. Index your data.
++
+[source,console]
+----
+POST my-image-index/_bulk?refresh=true
+{ "index": { "_id": "1" } }
+{ "image-vector": [1, 5, -20], "file-type": "jpg" }
+{ "index": { "_id": "2" } }
+{ "image-vector": [42, 8, -15], "file-type": "png" }
+{ "index": { "_id": "3" } }
+{ "image-vector": [15, 11, 23], "file-type": "jpg" }
+----
+//TEST[continued]
+
+. Run the search using the `knn` query, asking for the top 3 nearest vectors.
++
+[source,console]
+----
+POST my-image-index/_search
+{
+ "size" : 3,
+ "query" : {
+ "knn": {
+ "field": "image-vector",
+ "query_vector": [-5, 9, -12],
+ "num_candidates": 10
+ }
+ }
+}
+----
+//TEST[continued]
+
+NOTE: `knn` query doesn't have a separate `k` parameter. `k` is defined by
+`size` parameter of a search request similar to other queries. `knn` query
+collects `num_candidates` results from each shard, then merges them to get
+the top `size` results.
+
+
+[[knn-query-top-level-parameters]]
+==== Top-level parameters for `knn`
+
+`field`::
++
+--
+(Required, string) The name of the vector field to search against. Must be a
+<>.
+--
+
+`query_vector`::
++
+--
+(Required, array of floats) Query vector. Must have the same number of dimensions
+as the vector field you are searching against.
+--
+
+`num_candidates`::
++
+--
+(Required, integer) The number of nearest neighbor candidates to consider per shard.
+Cannot exceed 10,000. {es} collects `num_candidates` results from each shard, then
+merges them to find the top results. Increasing `num_candidates` tends to improve the
+accuracy of the final results.
+--
+
+`filter`::
++
+--
+(Optional, query object) Query to filter the documents that can match.
+The kNN search will return the top documents that also match this filter.
+The value can be a single query or a list of queries. If `filter` is not provided,
+all documents are allowed to match.
+
+The filter is a pre-filter, meaning that it is applied **during** the approximate
+kNN search to ensure that `num_candidates` matching documents are returned.
+--
+
+`similarity`::
++
+--
+(Optional, float) The minimum similarity required for a document to be considered
+a match. The similarity value calculated relates to the raw
+<> used. Not the document score. The matched
+documents are then scored according to <>
+and the provided `boost` is applied.
+--
+
+`boost`::
++
+--
+(Optional, float) Floating point number used to multiply the
+scores of matched documents. This value cannot be negative. Defaults to `1.0`.
+--
+
+`_name`::
++
+--
+(Optional, string) Name field to identify the query
+--
+
+[[knn-query-filtering]]
+==== Pre-filters and post-filters in knn query
+
+There are two ways to filter documents that match a kNN query:
+
+. **pre-filtering** – filter is applied during the approximate kNN search
+to ensure that `k` matching documents are returned.
+. **post-filtering** – filter is applied after the approximate kNN search
+completes, which results in fewer than k results, even when there are enough
+matching documents.
+
+Pre-filtering is supported through the `filter` parameter of the `knn` query.
+Also filters from <> are applied as pre-filters.
+
+All other filters found in the Query DSL tree are applied as post-filters.
+For example, `knn` query finds the top 3 documents with the nearest vectors
+(num_candidates=3), which are combined with `term` filter, that is
+post-filtered. The final set of documents will contain only a single document
+that passes the post-filter.
+
+
+[source,console]
+----
+POST my-image-index/_search
+{
+ "size" : 10,
+ "query" : {
+ "bool" : {
+ "must" : {
+ "knn": {
+ "field": "image-vector",
+ "query_vector": [-5, 9, -12],
+ "num_candidates": 3
+ }
+ },
+ "filter" : {
+ "term" : { "file-type" : "png" }
+ }
+ }
+ }
+}
+----
+//TEST[continued]
+
+[[knn-query-with-nested-query]]
+==== Knn query inside a nested query
+
+`knn` query can be used inside a nested query. The behaviour here is similar
+to <>:
+
+* kNN search over nested dense_vectors diversifies the top results over
+the top-level document
+* `filter` over the top-level document metadata is supported and acts as a
+post-filter
+* `filter` over `nested` field metadata is not supported
+
+A sample query can look like below:
+
+[source,js]
+----
+{
+ "query" : {
+ "nested" : {
+ "path" : "paragraph",
+ "query" : {
+ "knn": {
+ "query_vector": [
+ 0.45,
+ 45
+ ],
+ "field": "paragraph.vector",
+ "num_candidates": 2
+ }
+ }
+ }
+ }
+}
+----
+// NOTCONSOLE
+
+[[knn-query-aggregations]]
+==== Knn query with aggregations
+`knn` query calculates aggregations on `num_candidates` from each shard.
+Thus, the final results from aggregations contain
+`num_candidates * number_of_shards` documents. This is different from
+the <> where aggregations are
+calculated on the global top k nearest documents.
+
diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc
index a6d35d4f9b70..d46377f69835 100644
--- a/docs/reference/query-dsl/special-queries.asciidoc
+++ b/docs/reference/query-dsl/special-queries.asciidoc
@@ -17,6 +17,10 @@ or collection of documents.
This query finds queries that are stored as documents that match with
the specified document.
+<>::
+A query that finds the _k_ nearest vectors to a query
+vector, as measured by a similarity metric.
+
<>::
A query that computes scores based on the values of numeric features and is
able to efficiently skip non-competitive hits.
@@ -43,6 +47,8 @@ include::mlt-query.asciidoc[]
include::percolate-query.asciidoc[]
+include::knn-query.asciidoc[]
+
include::rank-feature-query.asciidoc[]
include::script-query.asciidoc[]
diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc
index 8c676a5515ca..4bf1ceabe08d 100644
--- a/docs/reference/search/search-your-data/knn-search.asciidoc
+++ b/docs/reference/search/search-your-data/knn-search.asciidoc
@@ -43,7 +43,7 @@ based on a similarity metric, the better its match.
{es} supports two methods for kNN search:
* <> using the `knn` search
-option
+option or `knn` query
* <> using a `script_score` query with a
vector function
@@ -129,7 +129,8 @@ POST image-index/_bulk?refresh=true
//TEST[continued]
//TEST[s/\.\.\.//]
-. Run the search using the <>.
+. Run the search using the <> or the
+<> (expert case).
+
[source,console]
----
diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc
index 8c3f4c793e5e..023a8fcf860e 100644
--- a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc
+++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc
@@ -51,6 +51,17 @@ segment size is a way to prevent indices from having a long tail of very small s
This setting controls what value does <>
configures on the target index. It defaults to `100MB`.
+[[data-streams-lifecycle-signalling-error-retry-interval]]
+`data_streams.lifecycle.signalling.error_retry_interval`::
+(<>, integer)
+Represents the number of retries data stream lifecycle has to perform for an index
+in an error step in order to signal that the index is not progressing (i.e. it's
+stuck in an error step).
+The current signalling mechanism is a log statement at the `error` level however,
+the signalling mechanism can be extended in the future.
+Defaults to 10 retries.
+
+
==== Index level settings
The following index-level settings are typically configured on the backing indices of a data stream.
diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java
index 61d42e5db708..5ec0d129b8f9 100644
--- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java
+++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java
@@ -29,9 +29,8 @@
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
-public class SslConfigurationLoaderTests extends ESTestCase {
+public final class SslConfigurationLoaderTests extends ESTestCase {
- @SuppressWarnings("this-escape")
private final Path certRoot = getDataPath("/certs/ca1/ca.crt").getParent().getParent();
private Settings settings;
diff --git a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java
index fe9b1f673f71..37bdf37ce51a 100644
--- a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java
+++ b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/Centroid.java
@@ -26,7 +26,7 @@
/**
* A single centroid which represents a number of data points.
*/
-public class Centroid implements Comparable {
+public final class Centroid implements Comparable {
private static final AtomicInteger uniqueCount = new AtomicInteger(1);
private double centroid = 0;
@@ -40,19 +40,16 @@ private Centroid() {
id = uniqueCount.getAndIncrement();
}
- @SuppressWarnings("this-escape")
public Centroid(double x) {
this();
start(x, 1, uniqueCount.getAndIncrement());
}
- @SuppressWarnings("this-escape")
public Centroid(double x, long w) {
this();
start(x, w, uniqueCount.getAndIncrement());
}
- @SuppressWarnings("this-escape")
public Centroid(double x, long w, int id) {
this();
start(x, w, id);
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java
index 57649f7e3dfa..07bbc5c55f7c 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java
@@ -52,7 +52,7 @@ public class APMMeterRegistry implements MeterRegistry {
private final Registrar longGauges = new Registrar<>();
private final Registrar longHistograms = new Registrar<>();
- private final Meter meter;
+ private Meter meter;
public APMMeterRegistry(Meter meter) {
this.meter = meter;
@@ -170,8 +170,9 @@ public LongHistogram getLongHistogram(String name) {
public void setProvider(Meter meter) {
try (ReleasableLock lock = registerLock.acquire()) {
+ this.meter = meter;
for (Registrar> registrar : registrars) {
- registrar.setProvider(meter);
+ registrar.setProvider(this.meter);
}
}
}
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java
index 61b53f2087f6..2a806ca19a4e 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java
@@ -25,6 +25,7 @@
* @param delegated instrument
*/
public abstract class AbstractInstrument implements Instrument {
+ private static final int MAX_NAME_LENGTH = 63; // TODO(stu): change to 255 when we upgrade to otel 1.30+, see #101679
private final AtomicReference delegate;
private final String name;
private final String description;
@@ -33,6 +34,11 @@ public abstract class AbstractInstrument implements Instrument {
@SuppressWarnings("this-escape")
public AbstractInstrument(Meter meter, String name, String description, String unit) {
this.name = Objects.requireNonNull(name);
+ if (name.length() > MAX_NAME_LENGTH) {
+ throw new IllegalArgumentException(
+ "Instrument name [" + name + "] with length [" + name.length() + "] exceeds maximum length [" + MAX_NAME_LENGTH + "]"
+ );
+ }
this.description = Objects.requireNonNull(description);
this.unit = Objects.requireNonNull(unit);
this.delegate = new AtomicReference<>(doBuildInstrument(meter));
diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java
index 38fb0f0e0a8a..b393edd6e58e 100644
--- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java
+++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java
@@ -16,15 +16,20 @@
import org.elasticsearch.telemetry.apm.internal.APMMeterService;
import org.elasticsearch.telemetry.apm.internal.TestAPMMeterService;
import org.elasticsearch.telemetry.metric.DoubleCounter;
+import org.elasticsearch.telemetry.metric.LongCounter;
import org.elasticsearch.test.ESTestCase;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.sameInstance;
public class APMMeterRegistryTests extends ESTestCase {
- Meter testOtel = OpenTelemetry.noop().getMeter("test");
+ Meter testOtel = new RecordingOtelMeter();
Meter noopOtel = OpenTelemetry.noop().getMeter("noop");
+ private Settings TELEMETRY_ENABLED = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
+
public void testMeterIsSetUponConstruction() {
// test default
APMMeterService apmMeter = new APMMeterService(Settings.EMPTY, () -> testOtel, () -> noopOtel);
@@ -33,14 +38,13 @@ public void testMeterIsSetUponConstruction() {
assertThat(meter, sameInstance(noopOtel));
// test explicitly enabled
- var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
- apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel);
+ apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel);
meter = apmMeter.getMeterRegistry().getMeter();
assertThat(meter, sameInstance(testOtel));
// test explicitly disabled
- settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
+ var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), false).build();
apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel);
meter = apmMeter.getMeterRegistry().getMeter();
@@ -60,9 +64,7 @@ public void testMeterIsOverridden() {
}
public void testLookupByName() {
- var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
-
- var apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel).getMeterRegistry();
+ var apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel).getMeterRegistry();
DoubleCounter registeredCounter = apmMeter.registerDoubleCounter("name", "desc", "unit");
DoubleCounter lookedUpCounter = apmMeter.getDoubleCounter("name");
@@ -71,8 +73,7 @@ public void testLookupByName() {
}
public void testNoopIsSetOnStop() {
- var settings = Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), true).build();
- APMMeterService apmMeter = new APMMeterService(settings, () -> testOtel, () -> noopOtel);
+ APMMeterService apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel);
apmMeter.start();
Meter meter = apmMeter.getMeterRegistry().getMeter();
@@ -84,4 +85,16 @@ public void testNoopIsSetOnStop() {
assertThat(meter, sameInstance(noopOtel));
}
+ public void testMaxNameLength() {
+ APMMeterService apmMeter = new APMMeterService(TELEMETRY_ENABLED, () -> testOtel, () -> noopOtel);
+ apmMeter.start();
+ int max_length = 63;
+ var counter = apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length), "desc", "count");
+ assertThat(counter, instanceOf(LongCounter.class));
+ IllegalArgumentException iae = expectThrows(
+ IllegalArgumentException.class,
+ () -> apmMeter.getMeterRegistry().registerLongCounter("a".repeat(max_length + 1), "desc", "count")
+ );
+ assertThat(iae.getMessage(), containsString("exceeds maximum length [63]"));
+ }
}
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java
index 6a3ee1feb73f..73669ccacdbc 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java
+++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java
@@ -39,7 +39,7 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-public class CustomMustacheFactory extends DefaultMustacheFactory {
+public final class CustomMustacheFactory extends DefaultMustacheFactory {
static final String V7_JSON_MEDIA_TYPE_WITH_CHARSET = "application/json; charset=UTF-8";
static final String JSON_MEDIA_TYPE_WITH_CHARSET = "application/json;charset=utf-8";
static final String JSON_MEDIA_TYPE = "application/json";
@@ -63,7 +63,6 @@ public class CustomMustacheFactory extends DefaultMustacheFactory {
private final Encoder encoder;
- @SuppressWarnings("this-escape")
public CustomMustacheFactory(String mediaType) {
super();
setObjectHandler(new CustomReflectionObjectHandler());
diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java
index c234ea58c7ea..fffa5295522a 100644
--- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java
+++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateActionTests.java
@@ -23,8 +23,7 @@
import java.util.List;
import java.util.Map;
-public class RestMultiSearchTemplateActionTests extends RestActionTestCase {
- @SuppressWarnings("this-escape")
+public final class RestMultiSearchTemplateActionTests extends RestActionTestCase {
final List contentTypeHeader = Collections.singletonList(compatibleMediaType(XContentType.VND_JSON, RestApiVersion.V_7));
@Before
diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java
index bc0a5f87e25d..4e30d87b6a17 100644
--- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java
+++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/RestSearchTemplateActionTests.java
@@ -21,8 +21,7 @@
import java.util.List;
import java.util.Map;
-public class RestSearchTemplateActionTests extends RestActionTestCase {
- @SuppressWarnings("this-escape")
+public final class RestSearchTemplateActionTests extends RestActionTestCase {
final List contentTypeHeader = Collections.singletonList(randomCompatibleMediaType(RestApiVersion.V_7));
@Before
diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java
index 9362080c9cb3..cad976411b8d 100644
--- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java
+++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java
@@ -9,6 +9,7 @@
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest;
@@ -22,10 +23,12 @@
import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.Operator;
+import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.search.vectors.KnnVectorQueryBuilder;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentFactory;
@@ -1295,4 +1298,34 @@ public void testWithWildcardFieldNames() throws Exception {
).get();
assertEquals(1, response.getHits().getTotalHits().value);
}
+
+ public void testKnnQueryNotSupportedInPercolator() throws IOException {
+ String mappings = org.elasticsearch.common.Strings.format("""
+ {
+ "properties": {
+ "my_query" : {
+ "type" : "percolator"
+ },
+ "my_vector" : {
+ "type" : "dense_vector",
+ "dims" : 5,
+ "index" : true,
+ "similarity" : "l2_norm"
+ }
+
+ }
+ }
+ """);
+ indicesAdmin().prepareCreate("index1").setMapping(mappings).get();
+ ensureGreen();
+ QueryBuilder knnVectorQueryBuilder = new KnnVectorQueryBuilder("my_vector", new float[] { 1, 1, 1, 1, 1 }, 10, null);
+
+ IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index1")
+ .setId("knn_query1")
+ .setSource(jsonBuilder().startObject().field("my_query", knnVectorQueryBuilder).endObject());
+
+ DocumentParsingException exception = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get());
+ assertThat(exception.getMessage(), containsString("the [knn] query is unsupported inside a percolator"));
+ }
+
}
diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
index c00eaa894dd6..e21226428793 100644
--- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
+++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java
@@ -61,6 +61,7 @@
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.query.SearchExecutionContext;
+import org.elasticsearch.search.vectors.KnnVectorQueryBuilder;
import org.elasticsearch.xcontent.XContentParser;
import java.io.ByteArrayOutputStream;
@@ -438,6 +439,8 @@ static QueryBuilder parseQueryBuilder(DocumentParserContext context) {
throw new IllegalArgumentException("the [has_child] query is unsupported inside a percolator query");
} else if (queryName.equals("has_parent")) {
throw new IllegalArgumentException("the [has_parent] query is unsupported inside a percolator query");
+ } else if (queryName.equals(KnnVectorQueryBuilder.NAME)) {
+ throw new IllegalArgumentException("the [knn] query is unsupported inside a percolator query");
}
});
} catch (IOException e) {
diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java
index 15f9798abe88..ce63bcba0345 100644
--- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java
+++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java
@@ -26,7 +26,7 @@
/**
* Request to perform a search ranking evaluation.
*/
-public class RankEvalRequest extends ActionRequest implements IndicesRequest.Replaceable {
+public final class RankEvalRequest extends ActionRequest implements IndicesRequest.Replaceable {
private RankEvalSpec rankingEvaluationSpec;
@@ -35,7 +35,6 @@ public class RankEvalRequest extends ActionRequest implements IndicesRequest.Rep
private SearchType searchType = SearchType.DEFAULT;
- @SuppressWarnings("this-escape")
public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) {
this.rankingEvaluationSpec = Objects.requireNonNull(rankingEvaluationSpec, "ranking evaluation specification must not be null");
indices(indices);
diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java
index f99a22cbac6e..982d1afcf6dd 100644
--- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java
+++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TransportRankEvalActionTests.java
@@ -30,10 +30,9 @@
import static org.mockito.Mockito.mock;
-public class TransportRankEvalActionTests extends ESTestCase {
+public final class TransportRankEvalActionTests extends ESTestCase {
- @SuppressWarnings("this-escape")
- private Settings settings = Settings.builder()
+ private final Settings settings = Settings.builder()
.put("path.home", createTempDir().toString())
.put("node.name", "test-" + getTestName())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java
index 8e1cfb309a67..fdd98992503d 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestDeleteByQueryActionTests.java
@@ -23,9 +23,8 @@
import java.util.List;
import java.util.Map;
-public class RestDeleteByQueryActionTests extends RestActionTestCase {
+public final class RestDeleteByQueryActionTests extends RestActionTestCase {
- @SuppressWarnings("this-escape")
final List contentTypeHeader = Collections.singletonList(compatibleMediaType(XContentType.VND_JSON, RestApiVersion.V_7));
@Before
diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java
index 7222b5efe9c8..889c8d0091c8 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RestUpdateByQueryActionTests.java
@@ -23,9 +23,8 @@
import java.util.List;
import java.util.Map;
-public class RestUpdateByQueryActionTests extends RestActionTestCase {
+public final class RestUpdateByQueryActionTests extends RestActionTestCase {
- @SuppressWarnings("this-escape")
final List contentTypeHeader = Collections.singletonList(compatibleMediaType(XContentType.VND_JSON, RestApiVersion.V_7));
@Before
diff --git a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java
index 6d89571e5af9..1bb2116cc680 100644
--- a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java
+++ b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java
@@ -12,14 +12,12 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
-import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Request;
-import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
-import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import org.elasticsearch.test.rest.ESRestTestCase;
@@ -44,7 +42,6 @@
* This test ensure that we keep the search states of a CCS request correctly when the local and remote clusters
* have different but compatible versions. See SearchService#createAndPutReaderContext
*/
-@SuppressWarnings("removal")
public class SearchStatesIT extends ESRestTestCase {
private static final Logger LOGGER = LogManager.getLogger(SearchStatesIT.class);
@@ -90,7 +87,7 @@ static List parseHosts(String props) {
public static void configureRemoteClusters(List remoteNodes) throws Exception {
assertThat(remoteNodes, hasSize(3));
final String remoteClusterSettingPrefix = "cluster.remote." + CLUSTER_ALIAS + ".";
- try (RestClient localClient = newLocalClient().getLowLevelClient()) {
+ try (RestClient localClient = newLocalClient()) {
final Settings remoteConnectionSettings;
if (randomBoolean()) {
final List seeds = remoteNodes.stream()
@@ -124,28 +121,32 @@ public static void configureRemoteClusters(List remoteNodes) throws Except
}
}
- static RestHighLevelClient newLocalClient() {
+ static RestClient newLocalClient() {
final List hosts = parseHosts("tests.rest.cluster");
final int index = random().nextInt(hosts.size());
LOGGER.info("Using client node {}", index);
- return new RestHighLevelClient(RestClient.builder(hosts.get(index)));
+ return RestClient.builder(hosts.get(index)).build();
}
- static RestHighLevelClient newRemoteClient() {
- return new RestHighLevelClient(RestClient.builder(randomFrom(parseHosts("tests.rest.remote_cluster"))));
+ static RestClient newRemoteClient() {
+ return RestClient.builder(randomFrom(parseHosts("tests.rest.remote_cluster"))).build();
}
- static int indexDocs(RestHighLevelClient client, String index, int numDocs) throws IOException {
+ static int indexDocs(RestClient client, String index, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) {
- client.index(new IndexRequest(index).id("id_" + i).source("f", i), RequestOptions.DEFAULT);
+ Request createDoc = new Request("POST", "/" + index + "/_doc/id_" + i);
+ createDoc.setJsonEntity(Strings.format("""
+ { "f": %s }
+ """, i));
+ assertOK(client.performRequest(createDoc));
}
- refresh(client.getLowLevelClient(), index);
+ refresh(client, index);
return numDocs;
}
void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int remoteNumDocs, Integer preFilterShardSize) {
- try (RestClient localClient = newLocalClient().getLowLevelClient()) {
+ try (RestClient localClient = newLocalClient()) {
Request request = new Request("POST", "/_search");
final int expectedDocs;
if (randomBoolean()) {
@@ -185,56 +186,40 @@ void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int r
public void testBWCSearchStates() throws Exception {
String localIndex = "test_bwc_search_states_index";
String remoteIndex = "test_bwc_search_states_remote_index";
- try (RestHighLevelClient localClient = newLocalClient(); RestHighLevelClient remoteClient = newRemoteClient()) {
- createIndex(
- localClient.getLowLevelClient(),
- localIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build()
- );
+ try (RestClient localClient = newLocalClient(); RestClient remoteClient = newRemoteClient()) {
+ createIndex(localClient, localIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build());
int localNumDocs = indexDocs(localClient, localIndex, between(10, 100));
- createIndex(
- remoteClient.getLowLevelClient(),
- remoteIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build()
- );
+ createIndex(remoteClient, remoteIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)).build());
int remoteNumDocs = indexDocs(remoteClient, remoteIndex, between(10, 100));
- configureRemoteClusters(getNodes(remoteClient.getLowLevelClient()));
+ configureRemoteClusters(getNodes(remoteClient));
int iterations = between(1, 20);
for (int i = 0; i < iterations; i++) {
verifySearch(localIndex, localNumDocs, CLUSTER_ALIAS + ":" + remoteIndex, remoteNumDocs, null);
}
- deleteIndex(localClient.getLowLevelClient(), localIndex);
- deleteIndex(remoteClient.getLowLevelClient(), remoteIndex);
+ deleteIndex(localClient, localIndex);
+ deleteIndex(remoteClient, remoteIndex);
}
}
public void testCanMatch() throws Exception {
String localIndex = "test_can_match_local_index";
String remoteIndex = "test_can_match_remote_index";
- try (RestHighLevelClient localClient = newLocalClient(); RestHighLevelClient remoteClient = newRemoteClient()) {
- createIndex(
- localClient.getLowLevelClient(),
- localIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build()
- );
+ try (RestClient localClient = newLocalClient(); RestClient remoteClient = newRemoteClient()) {
+ createIndex(localClient, localIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build());
int localNumDocs = indexDocs(localClient, localIndex, between(10, 100));
- createIndex(
- remoteClient.getLowLevelClient(),
- remoteIndex,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build()
- );
+ createIndex(remoteClient, remoteIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20)).build());
int remoteNumDocs = indexDocs(remoteClient, remoteIndex, between(10, 100));
- configureRemoteClusters(getNodes(remoteClient.getLowLevelClient()));
+ configureRemoteClusters(getNodes(remoteClient));
int iterations = between(1, 10);
for (int i = 0; i < iterations; i++) {
verifySearch(localIndex, localNumDocs, CLUSTER_ALIAS + ":" + remoteIndex, remoteNumDocs, between(1, 10));
}
- deleteIndex(localClient.getLowLevelClient(), localIndex);
- deleteIndex(remoteClient.getLowLevelClient(), remoteIndex);
+ deleteIndex(localClient, localIndex);
+ deleteIndex(remoteClient, remoteIndex);
}
}
}
diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java
index d1fefd425ae7..54f82b2366d1 100644
--- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java
+++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Packages.java
@@ -310,7 +310,7 @@ public static void restartElasticsearch(Shell sh, Installation installation) thr
* when instantiated, and advancing that cursor when the {@code clear()}
* method is called.
*/
- public static class JournaldWrapper {
+ public static final class JournaldWrapper {
private Shell sh;
private String cursor;
@@ -318,7 +318,6 @@ public static class JournaldWrapper {
* Create a new wrapper for Elasticsearch JournalD logs.
* @param sh A shell with appropriate permissions.
*/
- @SuppressWarnings("this-escape")
public JournaldWrapper(Shell sh) {
this.sh = sh;
clear();
diff --git a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java
index 824f4db5c4cf..d9be4045c37e 100644
--- a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java
+++ b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java
@@ -11,7 +11,6 @@
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RestClient;
-import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
@@ -26,10 +25,8 @@
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.util.Collections;
import java.util.function.Consumer;
-@SuppressWarnings("removal")
public abstract class AbstractMultiClusterRemoteTestCase extends ESRestTestCase {
private static final String USER = "x_pack_rest_user";
@@ -40,8 +37,8 @@ protected boolean preserveClusterUponCompletion() {
return true;
}
- private static RestHighLevelClient cluster1Client;
- private static RestHighLevelClient cluster2Client;
+ private static RestClient cluster1Client;
+ private static RestClient cluster2Client;
private static boolean initialized = false;
@Override
@@ -62,8 +59,8 @@ public void initClientsAndConfigureClusters() throws Exception {
request.addParameter("wait_for_status", "yellow");
request.addParameter("wait_for_nodes", "1");
};
- ensureHealth(cluster1Client().getLowLevelClient(), waitForYellowRequest);
- ensureHealth(cluster2Client().getLowLevelClient(), waitForYellowRequest);
+ ensureHealth(cluster1Client, waitForYellowRequest);
+ ensureHealth(cluster2Client, waitForYellowRequest);
initialized = true;
}
@@ -86,28 +83,22 @@ public static void destroyClients() throws IOException {
}
}
- protected static RestHighLevelClient cluster1Client() {
+ protected static RestClient cluster1Client() {
return cluster1Client;
}
- protected static RestHighLevelClient cluster2Client() {
+ protected static RestClient cluster2Client() {
return cluster2Client;
}
- private static class HighLevelClient extends RestHighLevelClient {
- private HighLevelClient(RestClient restClient) {
- super(restClient, RestClient::close, Collections.emptyList());
- }
- }
-
- private RestHighLevelClient buildClient(final String url) throws IOException {
+ private RestClient buildClient(final String url) throws IOException {
int portSeparator = url.lastIndexOf(':');
HttpHost httpHost = new HttpHost(
url.substring(0, portSeparator),
Integer.parseInt(url.substring(portSeparator + 1)),
getProtocol()
);
- return new HighLevelClient(buildClient(restAdminSettings(), new HttpHost[] { httpHost }));
+ return buildClient(restAdminSettings(), new HttpHost[] { httpHost });
}
protected boolean isOss() {
diff --git a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java
index 9a0303ab6071..78ffb9cb7b7b 100644
--- a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java
+++ b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/RemoteClustersIT.java
@@ -7,13 +7,10 @@
*/
package org.elasticsearch.cluster.remote.test;
-import org.elasticsearch.action.index.IndexRequest;
-import org.elasticsearch.action.search.SearchRequest;
-import org.elasticsearch.action.support.WriteRequest;
-import org.elasticsearch.client.RequestOptions;
+import org.elasticsearch.client.Request;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.xcontent.XContentFactory;
+import org.elasticsearch.test.rest.ObjectPath;
import org.junit.After;
import org.junit.Before;
@@ -28,44 +25,53 @@ public class RemoteClustersIT extends AbstractMultiClusterRemoteTestCase {
@Before
public void setupIndices() throws IOException {
- RestClient cluster1Client = cluster1Client().getLowLevelClient();
- assertTrue(createIndex(cluster1Client, "test1", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
- cluster1Client().index(
- new IndexRequest("test1").id("id1")
- .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
- .source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()),
- RequestOptions.DEFAULT
- );
-
- RestClient cluster2Client = cluster2Client().getLowLevelClient();
- assertTrue(createIndex(cluster2Client, "test2", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
- cluster2Client().index(
- new IndexRequest("test2").id("id1").source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()),
- RequestOptions.DEFAULT
- );
- cluster2Client().index(
- new IndexRequest("test2").id("id2")
- .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
- .source(XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()),
- RequestOptions.DEFAULT
- );
- assertEquals(1L, cluster1Client().search(new SearchRequest("test1"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
- assertEquals(2L, cluster2Client().search(new SearchRequest("test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value);
+ assertTrue(createIndex(cluster1Client(), "test1", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
+ {
+ Request createDoc = new Request("POST", "/test1/_doc/id1?refresh=true");
+ createDoc.setJsonEntity("""
+ { "foo": "bar" }
+ """);
+ assertOK(cluster1Client().performRequest(createDoc));
+ }
+ {
+ Request searchRequest = new Request("POST", "/test1/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(1, (int) doc.evaluate("hits.total.value"));
+ }
+
+ assertTrue(createIndex(cluster2Client(), "test2", Settings.builder().put("index.number_of_replicas", 0).build()).isAcknowledged());
+ {
+ Request createDoc = new Request("POST", "/test2/_doc/id1?refresh=true");
+ createDoc.setJsonEntity("""
+ { "foo": "bar" }
+ """);
+ assertOK(cluster2Client().performRequest(createDoc));
+ }
+ {
+ Request createDoc = new Request("POST", "/test2/_doc/id2?refresh=true");
+ createDoc.setJsonEntity("""
+ { "foo": "bar" }
+ """);
+ assertOK(cluster2Client().performRequest(createDoc));
+ }
+ {
+ Request searchRequest = new Request("POST", "/test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster2Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
@After
public void clearIndices() throws IOException {
- RestClient cluster1Client = cluster1Client().getLowLevelClient();
- assertTrue(deleteIndex(cluster1Client, "*").isAcknowledged());
- RestClient cluster2Client = cluster2Client().getLowLevelClient();
- assertTrue(deleteIndex(cluster2Client, "*").isAcknowledged());
+ assertTrue(deleteIndex(cluster1Client(), "*").isAcknowledged());
+ assertTrue(deleteIndex(cluster2Client(), "*").isAcknowledged());
}
@After
public void clearRemoteClusterSettings() throws IOException {
Settings setting = Settings.builder().putNull("cluster.remote.*").build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), setting);
- updateClusterSettings(cluster2Client().getLowLevelClient(), setting);
+ updateClusterSettings(cluster1Client(), setting);
+ updateClusterSettings(cluster2Client(), setting);
}
public void testProxyModeConnectionWorks() throws IOException {
@@ -76,14 +82,15 @@ public void testProxyModeConnectionWorks() throws IOException {
.put("cluster.remote.cluster2.proxy_address", cluster2RemoteClusterSeed)
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertTrue(isConnected(cluster1Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster1Client()));
- assertEquals(
- 2L,
- cluster1Client().search(new SearchRequest("cluster2:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/cluster2:test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
public void testSniffModeConnectionFails() throws IOException {
@@ -93,9 +100,9 @@ public void testSniffModeConnectionFails() throws IOException {
.put("cluster.remote.cluster2alt.mode", "sniff")
.put("cluster.remote.cluster2alt.seeds", cluster2RemoteClusterSeed)
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertFalse(isConnected(cluster1Client().getLowLevelClient()));
+ assertFalse(isConnected(cluster1Client()));
}
public void testHAProxyModeConnectionWorks() throws IOException {
@@ -105,14 +112,15 @@ public void testHAProxyModeConnectionWorks() throws IOException {
.put("cluster.remote.haproxynosn.mode", "proxy")
.put("cluster.remote.haproxynosn.proxy_address", proxyAddress)
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertTrue(isConnected(cluster1Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster1Client()));
- assertEquals(
- 2L,
- cluster1Client().search(new SearchRequest("haproxynosn:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/haproxynosn:test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
public void testHAProxyModeConnectionWithSNIToCluster1Works() throws IOException {
@@ -123,14 +131,15 @@ public void testHAProxyModeConnectionWithSNIToCluster1Works() throws IOException
.put("cluster.remote.haproxysni1.proxy_address", "haproxy:9600")
.put("cluster.remote.haproxysni1.server_name", "application1.example.com")
.build();
- updateClusterSettings(cluster2Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster2Client(), settings);
- assertTrue(isConnected(cluster2Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster2Client()));
- assertEquals(
- 1L,
- cluster2Client().search(new SearchRequest("haproxysni1:test1"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/haproxysni1:test1/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster2Client().performRequest(searchRequest));
+ assertEquals(1, (int) doc.evaluate("hits.total.value"));
+ }
}
public void testHAProxyModeConnectionWithSNIToCluster2Works() throws IOException {
@@ -141,14 +150,15 @@ public void testHAProxyModeConnectionWithSNIToCluster2Works() throws IOException
.put("cluster.remote.haproxysni2.proxy_address", "haproxy:9600")
.put("cluster.remote.haproxysni2.server_name", "application2.example.com")
.build();
- updateClusterSettings(cluster1Client().getLowLevelClient(), settings);
+ updateClusterSettings(cluster1Client(), settings);
- assertTrue(isConnected(cluster1Client().getLowLevelClient()));
+ assertTrue(isConnected(cluster1Client()));
- assertEquals(
- 2L,
- cluster1Client().search(new SearchRequest("haproxysni2:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value
- );
+ {
+ Request searchRequest = new Request("POST", "/haproxysni2:test2/_search");
+ ObjectPath doc = ObjectPath.createFromResponse(cluster1Client().performRequest(searchRequest));
+ assertEquals(2, (int) doc.evaluate("hits.total.value"));
+ }
}
@SuppressWarnings("unchecked")
diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml
index 0c814fd0f969..f8b1de515552 100644
--- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml
+++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml
@@ -148,3 +148,41 @@ setup:
_internal.get_desired_balance: { }
- is_true: 'cluster_info'
+
+---
+"Test undesired_shard_allocation_count":
+
+ - skip:
+ version: " - 8.11.99"
+ reason: "undesired_shard_allocation_count added in in 8.12.0"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ index: test
+ wait_for_status: green
+
+ - do:
+ cluster.state: {}
+ - set: { nodes._arbitrary_key_ : node_id }
+ - set: { nodes.$node_id.name : node_name }
+
+ - do:
+ _internal.get_desired_balance: { }
+
+ - gte: { 'cluster_balance_stats.shard_count' : 0 }
+ - gte: { 'cluster_balance_stats.undesired_shard_allocation_count' : 0 }
+ - gte: { 'cluster_balance_stats.nodes.$node_name.undesired_shard_allocation_count' : 0 }
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.total'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.min'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev'
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml
index 4f943abf1106..8e1d3431069c 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml
@@ -183,3 +183,41 @@ setup:
- do:
_internal.delete_desired_balance: { }
+
+---
+"Test undesired_shard_allocation_count":
+
+ - skip:
+ version: " - 8.11.99"
+ reason: "undesired_shard_allocation_count added in in 8.12.0"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+
+ - do:
+ cluster.health:
+ index: test
+ wait_for_status: green
+
+ - do:
+ cluster.state: {}
+ - set: { nodes._arbitrary_key_ : node_id }
+ - set: { nodes.$node_id.name : node_name }
+
+ - do:
+ _internal.get_desired_balance: { }
+
+ - gte: { 'cluster_balance_stats.shard_count' : 0 }
+ - gte: { 'cluster_balance_stats.undesired_shard_allocation_count' : 0 }
+ - gte: { 'cluster_balance_stats.nodes.$node_name.undesired_shard_allocation_count' : 0 }
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.total'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.min'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.max'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.average'
+ - exists: 'cluster_balance_stats.tiers.data_content.undesired_shard_allocation_count.std_dev'
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
index 1a03896f6d08..4607ae758b91 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/10_unified.yml
@@ -93,3 +93,50 @@ teardown:
- match: {hits.hits.0.highlight.text.0: "The quickbrownfox is brown."}
- match: {hits.hits.0.highlight.text\.fvh.0: "The quickbrownfox is brown."}
- match: {hits.hits.0.highlight.text\.postings.0: "The quickbrownfox is brown."}
+---
+"Test hybrid search with knn where automatically disables weighted mode":
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'kNN was not correctly skipped until 8.12'
+
+ - do:
+ indices.create:
+ index: test-highlighting-knn
+ body:
+ mappings:
+ "properties":
+ "vectors":
+ "type": "dense_vector"
+ "dims": 2
+ "index": true
+ "similarity": "l2_norm"
+ "text":
+ "type": "text"
+ "fields":
+ "fvh":
+ "type": "text"
+ "term_vector": "with_positions_offsets"
+ "postings":
+ "type": "text"
+ "index_options": "offsets"
+ - do:
+ index:
+ index: test-highlighting-knn
+ id: "1"
+ body:
+ "text" : "The quick brown fox is brown."
+ "vectors": [1, 2]
+ - do:
+ indices.refresh: {}
+
+ - do:
+ search:
+ index: test-highlighting-knn
+ body: {
+ "query": { "multi_match": { "query": "quick brown fox", "type": "phrase", "fields": [ "text*" ] } },
+ "highlight": { "type": "unified", "fields": { "*": { } } },
+ "knn": { "field": "vectors", "query_vector": [1, 2], "k": 10, "num_candidates": 10 } }
+
+ - match: { hits.hits.0.highlight.text.0: "The quickbrownfox is brown." }
+ - match: { hits.hits.0.highlight.text\.fvh.0: "The quickbrownfox is brown." }
+ - match: { hits.hits.0.highlight.text\.postings.0: "The quickbrownfox is brown." }
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
new file mode 100644
index 000000000000..849df86a3056
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
@@ -0,0 +1,274 @@
+# test how knn query interacts with filters
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+
+ - do:
+ indices.create:
+ index: my_index
+ body:
+ settings:
+ number_of_shards: 1
+ mappings:
+ dynamic: false
+ properties:
+ my_vector:
+ type: dense_vector
+ dims: 4
+ index : true
+ similarity : l2_norm
+ my_name:
+ type: keyword
+ store: true
+ aliases:
+ my_alias:
+ filter:
+ term:
+ my_name: v2
+ my_alias1:
+ filter:
+ term:
+ my_name: v1
+
+ - do:
+ bulk:
+ refresh: true
+ index: my_index
+ body:
+ - '{"index": {"_id": "1"}}'
+ - '{"my_vector": [1, 1, 1, 1], "my_name": "v1"}'
+ - '{"index": {"_id": "2"}}'
+ - '{"my_vector": [1, 1, 1, 2], "my_name": "v2"}'
+ - '{"index": {"_id": "3"}}'
+ - '{"my_vector": [1, 1, 1, 3], "my_name": "v1"}'
+ - '{"index": {"_id": "4"}}'
+ - '{"my_vector": [1, 1, 1, 4], "my_name": "v2"}'
+ - '{"index": {"_id": "5"}}'
+ - '{"my_vector": [1, 1, 1, 5], "my_name": "v1"}'
+ - '{"index": {"_id": "6"}}'
+ - '{"my_vector": [1, 1, 1, 6], "my_name": "v2"}'
+ - '{"index": {"_id": "7"}}'
+ - '{"my_vector": [1, 1, 1, 7], "my_name": "v1"}'
+ - '{"index": {"_id": "8"}}'
+ - '{"my_vector": [1, 1, 1, 8], "my_name": "v2"}'
+ - '{"index": {"_id": "9"}}'
+ - '{"my_vector": [1, 1, 1, 9], "my_name": "v1"}'
+ - '{"index": {"_id": "10"}}'
+ - '{"my_vector": [1, 1, 1, 10], "my_name": "v2"}'
+
+---
+"Simple knn query":
+
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 5 } # collector sees num_candidates docs
+ - length: {hits.hits: 3}
+ - match: { hits.hits.0._id: "1" }
+ - match: { hits.hits.0.fields.my_name.0: v1 }
+ - match: { hits.hits.1._id: "2" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "3" }
+ - match: { hits.hits.2.fields.my_name.0: v1 }
+---
+"PRE_FILTER: knn query with alias filter as pre-filter":
+ - do:
+ search:
+ index: my_alias
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 5 } # collector sees num_candidates docs
+ - length: {hits.hits: 3}
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "6" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+
+ # alias prefilter is combined with internal filter
+ - do:
+ search:
+ index: my_alias
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ filter:
+ term:
+ my_name: v1
+
+ # both alias filter and internal filter are applied as pre-filter resulting in 0 hits for knn search
+ - match: { hits.total.value: 0 }
+ - length: { hits.hits: 0 }
+
+ # alias prefilter is applied when knn is a part of another query
+ - do:
+ search:
+ index: my_alias
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ bool:
+ should:
+ - wildcard:
+ my_name:
+ value: "v*"
+ - knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 5 }
+ - length: { hits.hits: 3 }
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "6" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+
+---
+"PRE_FILTER: pre-filter across multiple internal filters":
+- do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ filter:
+ - term:
+ my_name: v1
+ - term:
+ my_name: v2
+- match: { hits.total.value: 0 }
+- length: { hits.hits: 0 }
+
+---
+"PRE_FILTER: pre-filter across multiple aliases":
+ - do:
+ search:
+ index: my_alias,my_alias1
+ body:
+ size: 6
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 100
+
+ - match: { hits.total.value: 10 } # 5 docs from each alias
+ - length: {hits.hits: 6}
+ - match: { hits.hits.0._id: "1" }
+ - match: { hits.hits.0.fields.my_name.0: v1 }
+ - match: { hits.hits.1._id: "2" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "3" }
+ - match: { hits.hits.2.fields.my_name.0: v1 }
+ - match: { hits.hits.3._id: "4" }
+ - match: { hits.hits.3.fields.my_name.0: v2 }
+ - match: { hits.hits.4._id: "5" }
+ - match: { hits.hits.4.fields.my_name.0: v1 }
+ - match: { hits.hits.5._id: "6" }
+ - match: { hits.hits.5.fields.my_name.0: v2 }
+
+---
+"PRE_FILTER: knn query with internal filter as pre-filter":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+ filter:
+ term:
+ my_name: v2
+
+ - match: { hits.total.value: 5 }
+ - length: {hits.hits: 3}
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+ - match: { hits.hits.2._id: "6" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+
+---
+"POST_FILTER: knn query with filter from a parent bool query as post-filter":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ bool:
+ must:
+ - term:
+ my_name: v2
+ - knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 5
+
+ - match: { hits.total.value: 2 }
+ - length: {hits.hits: 2} # knn query returns top 5 docs, but they are post-filtered to 2 docs
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "4" }
+ - match: { hits.hits.1.fields.my_name.0: v2 }
+
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ bool:
+ must:
+ - term:
+ my_name: v2
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ filter:
+ term:
+ my_name: v1
+
+ - match: { hits.total.value: 0}
+ - length: { hits.hits: 0 } # knn query returns top 5 docs, but they are post-filtered to 0 docs
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml
new file mode 100644
index 000000000000..b1c0fd948481
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml
@@ -0,0 +1,216 @@
+# test how knn query interacts with filters
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+ features: close_to
+
+ - do:
+ indices.create:
+ index: my_index
+ body:
+ settings:
+ number_of_shards: 2
+ mappings:
+ dynamic: false
+ properties:
+ my_vector:
+ type: dense_vector
+ dims: 4
+ index : true
+ similarity : l2_norm
+ my_name:
+ type: keyword
+ store: true
+
+ - do:
+ bulk:
+ refresh: true
+ index: my_index
+ body:
+ - '{"index": {"_id": "1"}}'
+ - '{"my_vector": [1, 1, 1, 1], "my_name": "v1"}'
+ - '{"index": {"_id": "2"}}'
+ - '{"my_vector": [1, 1, 1, 2], "my_name": "v2"}'
+ - '{"index": {"_id": "3"}}'
+ - '{"my_vector": [1, 1, 1, 3], "my_name": "v1"}'
+ - '{"index": {"_id": "4"}}'
+ - '{"my_vector": [1, 1, 1, 4], "my_name": "v2"}'
+ - '{"index": {"_id": "5"}}'
+ - '{"my_vector": [1, 1, 1, 5], "my_name": "v1"}'
+ - '{"index": {"_id": "6"}}'
+ - '{"my_vector": [1, 1, 1, 6], "my_name": "v2"}'
+ - '{"index": {"_id": "7"}}'
+ - '{"my_vector": [1, 1, 1, 7], "my_name": "v1"}'
+ - '{"index": {"_id": "8"}}'
+ - '{"my_vector": [1, 1, 1, 8], "my_name": "v2"}'
+ - '{"index": {"_id": "9"}}'
+ - '{"my_vector": [1, 1, 1, 9], "my_name": "v1"}'
+ - '{"index": {"_id": "10"}}'
+ - '{"my_vector": [1, 1, 1, 10], "my_name": "v2"}'
+ - '{"index": {"_id": "11"}}'
+ - '{"my_vector": [1, 1, 1, 11], "my_name": "v1"}'
+ - '{"index": {"_id": "12"}}'
+ - '{"my_vector": [1, 1, 1, 12], "my_name": "v2"}'
+
+
+---
+"Search for 2 knn queries combines scores from them":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 6
+ query:
+ bool:
+ should:
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 100
+ boost: 1.1
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 12 ]
+ num_candidates: 100
+
+ - length: {hits.hits: 6}
+ - match: {hits.total.value: 12}
+ - match: {hits.hits.0._id: '1'}
+ - match: {hits.hits.1._id: '12'}
+ - match: {hits.hits.2._id: '2'}
+ - match: { hits.hits.3._id: '11' }
+ - match: { hits.hits.4._id: '3' }
+ - match: { hits.hits.5._id: '10' }
+
+
+---
+"Hybrid search combines scores from knn and other queries":
+ - do:
+ search:
+ include_named_queries_score: true
+ index: my_index
+ body:
+ size: 3
+ query:
+ bool:
+ should:
+ - wildcard:
+ my_name:
+ value: "v*" # produces scores 1.0
+ _name: "bm25_query"
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 3
+ _name: "knn_query"
+
+ - length: {hits.hits: 3}
+ - match: {hits.total.value: 12}
+ - match: {hits.hits.0._id: '1'}
+ - match: {hits.hits.1._id: '2'}
+ - match: {hits.hits.2._id: '3'}
+
+ - close_to: {hits.hits.0._score: { value: 2.0, error: 0.00001 } }
+ - close_to: {hits.hits.0.matched_queries.bm25_query: { value: 1.0, error: 0.00001 } }
+ - close_to: {hits.hits.0.matched_queries.knn_query: { value: 1.0, error: 0.00001 } }
+
+ - close_to: {hits.hits.1._score: { value: 1.5, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.bm25_query: { value: 1.0, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.knn_query: { value: 0.5, error: 0.00001 } }
+
+ - close_to: {hits.hits.2._score: { value: 1.2, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.bm25_query: { value: 1.0, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.knn_query: { value: 0.2, error: 0.00001 } }
+
+ # the same query with boosts
+ - do:
+ search:
+ include_named_queries_score: true
+ index: my_index
+ body:
+ size: 3
+ query:
+ bool:
+ should:
+ - wildcard:
+ my_name:
+ value: "v*" # produces scores 1.0
+ boost: 100
+ _name: "bm25_query"
+ - knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 3
+ boost: 100
+ _name: "knn_query"
+
+ - length: { hits.hits: 3 }
+ - match: { hits.total.value: 12 }
+ - match: { hits.hits.0._id: '1' }
+ - match: { hits.hits.1._id: '2' }
+ - match: { hits.hits.2._id: '3' }
+
+ - close_to: { hits.hits.0._score: { value: 200.0, error: 0.00001 } }
+ - close_to: { hits.hits.0.matched_queries.bm25_query: { value: 100.0, error: 0.00001 } }
+ - close_to: { hits.hits.0.matched_queries.knn_query: { value: 100.0, error: 0.00001 } }
+
+ - close_to: { hits.hits.1._score: { value: 150.0, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.bm25_query: { value: 100.0, error: 0.00001 } }
+ - close_to: { hits.hits.1.matched_queries.knn_query: { value: 50.0, error: 0.00001 } }
+
+ - close_to: { hits.hits.2._score: { value: 120, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.bm25_query: { value: 100.0, error: 0.00001 } }
+ - close_to: { hits.hits.2.matched_queries.knn_query: { value: 20.0, error: 0.00001 } }
+
+---
+"Aggregations with collected number of docs depends on num_candidates":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 2
+ query:
+ knn:
+ field: my_vector
+ query_vector: [1, 1, 1, 1]
+ num_candidates: 100 # collect up to 100 candidates from each shard
+ aggs:
+ my_agg:
+ terms:
+ field: my_name
+ order:
+ _key: asc
+
+ - length: {hits.hits: 2}
+ - match: {hits.total.value: 12}
+ - match: {aggregations.my_agg.buckets.0.key: 'v1'}
+ - match: {aggregations.my_agg.buckets.1.key: 'v2'}
+ - match: {aggregations.my_agg.buckets.0.doc_count: 6}
+ - match: {aggregations.my_agg.buckets.1.doc_count: 6}
+
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 2
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 3 # collect 3 candidates from each shard
+ aggs:
+ my_agg2:
+ terms:
+ field: my_name
+ order:
+ _key: asc
+ my_sum_buckets:
+ sum_bucket:
+ buckets_path: "my_agg2>_count"
+
+ - length: { hits.hits: 2 }
+ - match: { hits.total.value: 6 }
+ - match: { aggregations.my_agg2.buckets.0.key: 'v1' }
+ - match: { aggregations.my_agg2.buckets.1.key: 'v2' }
+ - match: { aggregations.my_sum_buckets.value: 6.0 }
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml
new file mode 100644
index 000000000000..435291b454d0
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml
@@ -0,0 +1,213 @@
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+ - do:
+ indices.create:
+ index: test
+ body:
+ settings:
+ index:
+ number_of_shards: 1
+ mappings:
+ properties:
+ name:
+ type: keyword
+ nested:
+ type: nested
+ properties:
+ paragraph_id:
+ type: keyword
+ vector:
+ type: dense_vector
+ dims: 5
+ index: true
+ similarity: l2_norm
+ aliases:
+ my_alias:
+ filter:
+ term:
+ name: "rabbit.jpg"
+
+ - do:
+ index:
+ index: test
+ id: "1"
+ body:
+ name: cow.jpg
+ nested:
+ - paragraph_id: 0
+ vector: [230.0, 300.33, -34.8988, 15.555, -200.0]
+ - paragraph_id: 1
+ vector: [240.0, 300, -3, 1, -20]
+
+ - do:
+ index:
+ index: test
+ id: "2"
+ body:
+ name: moose.jpg
+ nested:
+ - paragraph_id: 0
+ vector: [-0.5, 100.0, -13, 14.8, -156.0]
+ - paragraph_id: 2
+ vector: [0, 100.0, 0, 14.8, -156.0]
+ - paragraph_id: 3
+ vector: [0, 1.0, 0, 1.8, -15.0]
+
+ - do:
+ index:
+ index: test
+ id: "3"
+ body:
+ name: rabbit.jpg
+ nested:
+ - paragraph_id: 0
+ vector: [0.5, 111.3, -13.0, 14.8, -156.0]
+
+ - do:
+ indices.refresh: {}
+
+---
+"nested kNN search that returns diverse parents docs":
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [-0.5, 90.0, -10, 14.8, -156.0]
+ num_candidates: 3
+
+ - match: {hits.hits.0._id: "2"}
+ - match: {hits.hits.0.fields.name.0: "moose.jpg"}
+
+ - match: {hits.hits.1._id: "3"}
+ - match: {hits.hits.1.fields.name.0: "rabbit.jpg"}
+
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 3
+ inner_hits: { size: 1, "fields": [ "nested.paragraph_id" ], _source: false }
+
+ - match: {hits.total.value: 3}
+
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.name.0: "moose.jpg" }
+ - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+ - match: { hits.hits.1._id: "3" }
+ - match: { hits.hits.1.fields.name.0: "rabbit.jpg" }
+ - match: { hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+ - match: { hits.hits.2._id: "1" }
+ - match: { hits.hits.2.fields.name.0: "cow.jpg" }
+ - match: { hits.hits.2.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+---
+"nested kNN search pre-filtered on alias with filter on top level fields":
+ - do:
+ search:
+ index: my_alias # filter on name: "rabbit.jpg"
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 1
+ inner_hits: { size: 1, "fields": [ "nested.paragraph_id" ], _source: false }
+
+ - match: {hits.total.value: 1} # as alias is passed as pre-filter, we get a single result
+ - match: {hits.hits.0._id: "3"}
+ - match: {hits.hits.0.fields.name.0: "rabbit.jpg"}
+ - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+
+---
+"nested kNN search post-filtered on top level fields":
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ bool:
+ must:
+ - term:
+ name: "rabbit.jpg"
+ - nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 1
+ - match: { hits.total.value: 0 } # no hits because returned single vector did not pass post-filter
+
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ bool:
+ must:
+ - term:
+ name: "rabbit.jpg"
+ - nested:
+ path: nested
+ query:
+ knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 3
+ inner_hits: { size: 1, fields: [ "nested.paragraph_id" ], _source: false }
+
+ - match: {hits.total.value: 1}
+ - match: {hits.hits.0._id: "3"}
+ - match: {hits.hits.0.fields.name.0: "rabbit.jpg"}
+ - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" }
+---
+
+"nested kNN search post-filtered on nested fields DOES NOT work":
+ - do:
+ search:
+ index: test
+ body:
+ fields: [ "name" ]
+ query:
+ nested:
+ path: nested
+ query:
+ bool:
+ must:
+ - term:
+ nested.paragraph_id: 3
+ - knn:
+ field: nested.vector
+ query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
+ num_candidates: 6
+ inner_hits: { size: 1, "fields": [ "nested.paragraph_id" ], _source: false }
+ # no hits because, regardless of num_candidates knn returns top 3 child vectors from distinct parents
+ # and they don't pass the post-filter
+ # TODO: fix it on Lucene level so nested knn respects num_candidates
+ # or do pre-filtering
+ - match: {hits.total.value: 0}
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml
new file mode 100644
index 000000000000..8f52a72cce01
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml
@@ -0,0 +1,127 @@
+# test how knn query interact with other queries
+setup:
+ - skip:
+ version: ' - 8.11.99'
+ reason: 'knn as query added in 8.12'
+ features: close_to
+
+ - do:
+ indices.create:
+ index: my_index
+ body:
+ settings:
+ number_of_shards: 1
+ mappings:
+ dynamic: false
+ properties:
+ my_vector:
+ type: dense_vector
+ dims: 4
+ index : true
+ similarity : l2_norm
+ my_name:
+ type: keyword
+ store: true
+ aliases:
+ my_alias:
+ filter:
+ term:
+ my_name: v2
+ my_alias1:
+ filter:
+ term:
+ my_name: v1
+
+ - do:
+ bulk:
+ refresh: true
+ index: my_index
+ body:
+ - '{"index": {"_id": "1"}}'
+ - '{"my_vector": [1, 1, 1, 1], "my_name": "v1"}'
+ - '{"index": {"_id": "2"}}'
+ - '{"my_vector": [1, 1, 1, 2], "my_name": "v2"}'
+ - '{"index": {"_id": "3"}}'
+ - '{"my_vector": [1, 1, 1, 3], "my_name": "v1"}'
+ - '{"index": {"_id": "4"}}'
+ - '{"my_vector": [1, 1, 1, 4], "my_name": "v2"}'
+ - '{"index": {"_id": "5"}}'
+ - '{"my_vector": [1, 1, 1, 5], "my_name": "v1"}'
+ - '{"index": {"_id": "6"}}'
+ - '{"my_vector": [1, 1, 1, 6], "my_name": "v2"}'
+ - '{"index": {"_id": "7"}}'
+ - '{"my_vector": [1, 1, 1, 7], "my_name": "v1"}'
+ - '{"index": {"_id": "8"}}'
+ - '{"my_vector": [1, 1, 1, 8], "my_name": "v2"}'
+ - '{"index": {"_id": "9"}}'
+ - '{"my_vector": [1, 1, 1, 9], "my_name": "v1"}'
+ - '{"index": {"_id": "10"}}'
+ - '{"my_vector": [1, 1, 1, 10], "my_name": "v2"}'
+
+---
+"Function score query with knn query":
+ # find top 5 knn docs, then boost docs with name v1 by 10 and docs with name v2 by 100
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 3
+ fields: [ my_name ]
+ query:
+ function_score:
+ query:
+ knn:
+ field: my_vector
+ query_vector: [ 1, 1, 1, 1 ]
+ num_candidates: 5
+ functions:
+ - filter: { match: { my_name: v1 } }
+ weight: 10
+ - filter: { match: { my_name: v2 } }
+ weight: 100
+ boost_mode: multiply
+
+ - match: { hits.total.value: 5 } # collector sees num_candidates docs
+ - length: { hits.hits: 3 }
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - close_to: { hits.hits.0._score: { value: 50.0, error: 0.001 } }
+ - match: { hits.hits.1._id: "1" }
+ - match: { hits.hits.1.fields.my_name.0: v1 }
+ - close_to: { hits.hits.1._score: { value: 10.0, error: 0.001 } }
+ - match: { hits.hits.2._id: "4" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+ - close_to: { hits.hits.2._score: { value: 10.0, error: 0.001 } }
+
+---
+"dis_max query with knn query":
+ - do:
+ search:
+ index: my_index
+ body:
+ size: 10
+ fields: [ my_name ]
+ query:
+ dis_max:
+ queries:
+ - knn: { field: my_vector, query_vector: [ 1, 1, 1, 1 ], num_candidates: 5 }
+ - match: { my_name: v2 }
+ tie_breaker: 0.8
+
+ - match: { hits.total.value: 8 } # 5 knn results + extra results from match query
+ - match: { hits.hits.0._id: "2" }
+ - match: { hits.hits.0.fields.my_name.0: v2 }
+ - match: { hits.hits.1._id: "1" }
+ - match: { hits.hits.1.fields.my_name.0: v1 }
+ - match: { hits.hits.2._id: "4" }
+ - match: { hits.hits.2.fields.my_name.0: v2 }
+ - match: { hits.hits.3._id: "6" }
+ - match: { hits.hits.3.fields.my_name.0: v2 }
+ - match: { hits.hits.4._id: "8" }
+ - match: { hits.hits.4.fields.my_name.0: v2 }
+ - match: { hits.hits.5._id: "10" }
+ - match: { hits.hits.5.fields.my_name.0: v2 }
+ - match: { hits.hits.6._id: "3" }
+ - match: { hits.hits.6.fields.my_name.0: v1 }
+ - match: { hits.hits.7._id: "5" }
+ - match: { hits.hits.7.fields.my_name.0: v1 }
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml
index 340cd8f8d0f7..57f8603f1e06 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml
@@ -294,23 +294,6 @@ setup:
- match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" }
---
-"Direct kNN queries are disallowed":
- - skip:
- version: ' - 8.3.99'
- reason: 'error message changed in 8.4'
- - do:
- catch: bad_request
- search:
- index: test-index
- body:
- query:
- knn:
- field: vector
- query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ]
- num_candidates: 1
- - match: { error.root_cause.0.type: "illegal_argument_exception" }
- - match: { error.root_cause.0.reason: "[knn] queries cannot be provided directly, use the [knn] body parameter instead" }
----
"KNN Vector similarity search only":
- skip:
version: ' - 8.7.99'
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml
index 873b6d87cac6..ea21bb69a77b 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml
@@ -163,20 +163,6 @@ setup:
- match: { error.root_cause.0.reason: "failed to create query: field [nonexistent] does not exist in the mapping" }
---
-"Direct kNN queries are disallowed":
- - do:
- catch: bad_request
- search:
- index: test
- body:
- query:
- knn:
- field: vector
- query_vector: [ -1, 0, 1, 2, 3 ]
- num_candidates: 1
- - match: { error.root_cause.0.type: "illegal_argument_exception" }
- - match: { error.root_cause.0.reason: "[knn] queries cannot be provided directly, use the [knn] body parameter instead" }
----
"Vector similarity search only":
- skip:
version: ' - 8.7.99'
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java
index 3a2c6b5ebd0f..80bba57270aa 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java
@@ -193,7 +193,7 @@ public void testDeleteCreateInOneBulk() throws Exception {
refresh();
disruption.startDisrupting();
logger.info("--> delete index");
- executeAndCancelCommittedPublication(indicesAdmin().prepareDelete("test").setTimeout("0s")).get(10, TimeUnit.SECONDS);
+ executeAndCancelCommittedPublication(indicesAdmin().prepareDelete("test").setTimeout("0s")).get(30, TimeUnit.SECONDS);
logger.info("--> and recreate it");
executeAndCancelCommittedPublication(
prepareCreate("test").setSettings(
@@ -201,7 +201,7 @@ public void testDeleteCreateInOneBulk() throws Exception {
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "0")
).setTimeout("0s")
- ).get(10, TimeUnit.SECONDS);
+ ).get(30, TimeUnit.SECONDS);
logger.info("--> letting cluster proceed");
@@ -295,7 +295,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception {
// Now make sure the indexing request finishes successfully
disruption.stopDisrupting();
- assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged());
+ assertTrue(putMappingResponse.get(30, TimeUnit.SECONDS).isAcknowledged());
assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class));
assertEquals(1, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal());
}
@@ -408,11 +408,11 @@ public void testDelayedMappingPropagationOnReplica() throws Exception {
// Now make sure the indexing request finishes successfully
disruption.stopDisrupting();
- assertTrue(putMappingResponse.get(10, TimeUnit.SECONDS).isAcknowledged());
+ assertTrue(putMappingResponse.get(30, TimeUnit.SECONDS).isAcknowledged());
assertThat(docIndexResponse.get(10, TimeUnit.SECONDS), instanceOf(IndexResponse.class));
assertEquals(2, docIndexResponse.get(10, TimeUnit.SECONDS).getShardInfo().getTotal()); // both shards should have succeeded
- assertThat(dynamicMappingsFut.get(10, TimeUnit.SECONDS).getResult(), equalTo(CREATED));
+ assertThat(dynamicMappingsFut.get(30, TimeUnit.SECONDS).getResult(), equalTo(CREATED));
}
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java
index e3bd85440c53..bd14f913b10e 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java
@@ -78,7 +78,7 @@ public void run() {
final var shardStatuses = snapshotShardsService.currentSnapshotShards(snapshot);
assertEquals(1, shardStatuses.size());
- final var shardStatus = shardStatuses.get(new ShardId(index, 0)).asCopy();
+ final var shardStatus = shardStatuses.get(new ShardId(index, 0));
logger.info("--> {}", shardStatus);
if (i == 0) {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java
index d68301a31072..ca522064e3d0 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java
@@ -1066,7 +1066,6 @@ public void testEquivalentDeletesAreDeduplicated() throws Exception {
}
}
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99355")
public void testMasterFailoverOnFinalizationLoop() throws Exception {
internalCluster().startMasterOnlyNodes(3);
final String dataNode = internalCluster().startDataOnlyNode();
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java
index ee955da01f4a..b2494c5bd2b9 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java
@@ -71,7 +71,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception {
List stages = snapshotShardsService.currentSnapshotShards(snapshot)
.values()
.stream()
- .map(status -> status.asCopy().getStage())
+ .map(IndexShardSnapshotStatus.Copy::getStage)
.toList();
assertThat(stages, hasSize(shards));
assertThat(stages, everyItem(equalTo(IndexShardSnapshotStatus.Stage.DONE)));
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index 8e636a93e4f0..65792ebcccc6 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -406,7 +406,8 @@
provides org.elasticsearch.features.FeatureSpecification
with
org.elasticsearch.features.FeaturesSupportedSpecification,
- org.elasticsearch.health.HealthFeature;
+ org.elasticsearch.health.HealthFeature,
+ org.elasticsearch.rest.RestFeatures;
uses org.elasticsearch.plugins.internal.SettingsExtension;
uses RestExtension;
diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java
index 6c0836c27744..d625da5df9cc 100644
--- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java
+++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java
@@ -257,7 +257,7 @@ public static void maybeDieOnAnotherThread(final Throwable throwable) {
final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace());
logger.error("fatal error {}: {}\n{}", error.getClass().getCanonicalName(), error.getMessage(), formatted);
} finally {
- new Thread(() -> { throw error; }).start();
+ new Thread(() -> { throw error; }, "elasticsearch-error-rethrower").start();
}
});
}
diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java
index d09be93772e7..083d7de37194 100644
--- a/server/src/main/java/org/elasticsearch/TransportVersions.java
+++ b/server/src/main/java/org/elasticsearch/TransportVersions.java
@@ -157,6 +157,9 @@ static TransportVersion def(int id) {
public static final TransportVersion CLUSTER_FEATURES_ADDED = def(8_526_00_0);
public static final TransportVersion DSL_ERROR_STORE_INFORMATION_ENHANCED = def(8_527_00_0);
public static final TransportVersion INVALID_BUCKET_PATH_EXCEPTION_INTRODUCED = def(8_528_00_0);
+ public static final TransportVersion KNN_AS_QUERY_ADDED = def(8_529_00_0);
+ public static final TransportVersion UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED = def(8_530_00_0);
+ public static final TransportVersion ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED = def(8_531_00_0);
/*
* STOP! READ THIS FIRST! No, really,
diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java
index 6ac451d5bc93..a855b6b8ee7e 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -282,6 +282,7 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards;
import org.elasticsearch.health.GetHealthAction;
import org.elasticsearch.health.RestGetHealthAction;
@@ -857,7 +858,7 @@ private static ActionFilters setupActionFilters(List actionPlugins
return new ActionFilters(Set.copyOf(finalFilters));
}
- public void initRestHandlers(Supplier nodesInCluster) {
+ public void initRestHandlers(Supplier nodesInCluster, Predicate clusterSupportsFeature) {
List catActions = new ArrayList<>();
Predicate catActionsFilter = restExtension.getCatActionsFilter();
Predicate restFilter = restExtension.getActionsFilter();
@@ -889,7 +890,7 @@ public void initRestHandlers(Supplier nodesInCluster) {
registerHandler.accept(new RestClusterStateAction(settingsFilter, threadPool));
registerHandler.accept(new RestClusterHealthAction());
registerHandler.accept(new RestClusterUpdateSettingsAction());
- registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter, nodesInCluster));
+ registerHandler.accept(new RestClusterGetSettingsAction(settings, clusterSettings, settingsFilter, clusterSupportsFeature));
registerHandler.accept(new RestClusterRerouteAction(settingsFilter));
registerHandler.accept(new RestClusterSearchShardsAction());
registerHandler.accept(new RestPendingClusterTasksAction());
diff --git a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
index bb4eb6c202b7..e018cf48fcef 100644
--- a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
+++ b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java
@@ -16,7 +16,7 @@
import java.io.IOException;
import java.io.PrintWriter;
-public class NoShardAvailableActionException extends ElasticsearchException {
+public final class NoShardAvailableActionException extends ElasticsearchException {
private static final StackTraceElement[] EMPTY_STACK_TRACE = new StackTraceElement[0];
@@ -28,22 +28,18 @@ public static NoShardAvailableActionException forOnShardFailureWrapper(String ms
return new NoShardAvailableActionException(null, msg, null, true);
}
- @SuppressWarnings("this-escape")
public NoShardAvailableActionException(ShardId shardId) {
this(shardId, null, null, false);
}
- @SuppressWarnings("this-escape")
public NoShardAvailableActionException(ShardId shardId, String msg) {
this(shardId, msg, null, false);
}
- @SuppressWarnings("this-escape")
public NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause) {
this(shardId, msg, cause, false);
}
- @SuppressWarnings("this-escape")
private NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause, boolean onShardFailureWrapper) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java
index a90bc14f9ac8..0999e7154b05 100644
--- a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java
+++ b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java
@@ -18,11 +18,10 @@
import java.io.IOException;
import java.util.Objects;
-public class RoutingMissingException extends ElasticsearchException {
+public final class RoutingMissingException extends ElasticsearchException {
private final String id;
- @SuppressWarnings("this-escape")
public RoutingMissingException(String index, String id) {
super("routing is required for [" + index + "]/[" + id + "]");
Objects.requireNonNull(index, "index must not be null");
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java
index b585e891a590..fc1179007952 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java
@@ -95,7 +95,7 @@ protected void masterOperation(
listener.onResponse(
new DesiredBalanceResponse(
desiredBalanceShardsAllocator.getStats(),
- ClusterBalanceStats.createFrom(state, clusterInfo, writeLoadForecaster),
+ ClusterBalanceStats.createFrom(state, latestDesiredBalance, clusterInfo, writeLoadForecaster),
createRoutingTable(state, latestDesiredBalance),
clusterInfo
)
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
index 0cf0baa75a8d..ebf01feaaa89 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
@@ -20,7 +20,7 @@
/**
* A request to get node (cluster) level information.
*/
-public class NodesInfoRequest extends BaseNodesRequest {
+public final class NodesInfoRequest extends BaseNodesRequest {
private final NodesInfoMetrics nodesInfoMetrics;
@@ -39,7 +39,6 @@ public NodesInfoRequest(StreamInput in) throws IOException {
* Get information from nodes based on the nodes ids specified. If none are passed, information
* for all nodes will be returned.
*/
- @SuppressWarnings("this-escape")
public NodesInfoRequest(String... nodesIds) {
super(nodesIds);
nodesInfoMetrics = new NodesInfoMetrics();
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
index 39205715dca8..6f6253491c58 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
@@ -20,7 +20,9 @@
import java.io.IOException;
import java.util.Objects;
-public class ClusterSearchShardsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable {
+public final class ClusterSearchShardsRequest extends MasterNodeReadRequest
+ implements
+ IndicesRequest.Replaceable {
private String[] indices = Strings.EMPTY_ARRAY;
@Nullable
@@ -31,7 +33,6 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot);
+ final var shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot);
if (shardsStatus == null) {
continue;
}
Map shardMapBuilder = new HashMap<>();
- for (Map.Entry shardEntry : shardsStatus.entrySet()) {
+ for (final var shardEntry : shardsStatus.entrySet()) {
final ShardId shardId = shardEntry.getKey();
- final IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardEntry.getValue().asCopy();
+ final IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardEntry.getValue();
final IndexShardSnapshotStatus.Stage stage = lastSnapshotStatus.getStage();
String shardNodeId = null;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
index 2a6f0325be1d..f8b9a9571ddd 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
@@ -243,7 +243,6 @@ private void buildResponse(
entry.indices().get(shardId.getIndexName()),
shardId
)
- .asCopy()
);
} else {
shardStatus = new SnapshotIndexShardStatus(entry.shardId(shardEntry.getKey()), stage);
@@ -322,7 +321,7 @@ private void loadRepositoryData(
repositoriesService.repository(repositoryName)
.getSnapshotInfo(new GetSnapshotInfoContext(snapshotIdsToLoad, true, task::isCancelled, (context, snapshotInfo) -> {
List shardStatusBuilder = new ArrayList<>();
- final Map shardStatuses;
+ final Map shardStatuses;
try {
shardStatuses = snapshotShards(repositoryName, repositoryData, task, snapshotInfo);
} catch (Exception e) {
@@ -330,8 +329,8 @@ private void loadRepositoryData(
context.onFailure(e);
return;
}
- for (Map.Entry shardStatus : shardStatuses.entrySet()) {
- IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy();
+ for (final var shardStatus : shardStatuses.entrySet()) {
+ IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue();
shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus));
}
final SnapshotsInProgress.State state = switch (snapshotInfo.state()) {
@@ -374,14 +373,14 @@ private void loadRepositoryData(
* @param snapshotInfo snapshot info
* @return map of shard id to snapshot status
*/
- private Map snapshotShards(
+ private Map snapshotShards(
final String repositoryName,
final RepositoryData repositoryData,
final CancellableTask task,
final SnapshotInfo snapshotInfo
) throws IOException {
final Repository repository = repositoriesService.repository(repositoryName);
- final Map shardStatus = new HashMap<>();
+ final Map shardStatus = new HashMap<>();
for (String index : snapshotInfo.indices()) {
IndexId indexId = repositoryData.resolveIndexId(index);
task.ensureNotCancelled();
@@ -394,7 +393,7 @@ private Map snapshotShards(
if (shardFailure != null) {
shardStatus.put(shardId, IndexShardSnapshotStatus.newFailed(shardFailure.reason()));
} else {
- final IndexShardSnapshotStatus shardSnapshotStatus;
+ final IndexShardSnapshotStatus.Copy shardSnapshotStatus;
if (snapshotInfo.state() == SnapshotState.FAILED) {
// If the snapshot failed, but the shard's snapshot does
// not have an exception, it means that partial snapshots
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
index 531dc6dc5eff..8a674292b3cc 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java
@@ -59,7 +59,7 @@ public static class Fields {
* A request to analyze a text associated with a specific index. Allow to provide
* the actual analyzer name to perform the analysis with.
*/
- public static class Request extends SingleShardRequest {
+ public static final class Request extends SingleShardRequest {
private String[] text;
private String analyzer;
@@ -91,7 +91,6 @@ public Request() {}
*
* @param index The text to analyze
*/
- @SuppressWarnings("this-escape")
public Request(String index) {
this.index(index);
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
index d2df8e20f99e..444246de7a1b 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java
@@ -163,7 +163,7 @@ public void markShardCopyAsStaleIfNeeded(
}
}
- public static class ShardRequest extends ReplicationRequest {
+ public static final class ShardRequest extends ReplicationRequest {
private final ClusterBlock clusterBlock;
@@ -175,7 +175,6 @@ public static class ShardRequest extends ReplicationRequest {
phase1 = in.readBoolean();
}
- @SuppressWarnings("this-escape")
public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) {
super(shardId);
this.clusterBlock = Objects.requireNonNull(clusterBlock);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java
index aec5718b31a8..eca4f6a1463b 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java
@@ -157,7 +157,7 @@ public void markShardCopyAsStaleIfNeeded(
}
}
- public static class ShardRequest extends ReplicationRequest {
+ public static final class ShardRequest extends ReplicationRequest {
private final ClusterBlock clusterBlock;
@@ -166,7 +166,6 @@ public static class ShardRequest extends ReplicationRequest {
clusterBlock = new ClusterBlock(in);
}
- @SuppressWarnings("this-escape")
public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final TaskId parentTaskId) {
super(shardId);
this.clusterBlock = Objects.requireNonNull(clusterBlock);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
index bccc7a8f7e24..391ac532a0c3 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java
@@ -24,7 +24,7 @@
* The SHARD_LEVEL flags are for stat fields that can be calculated at the shard level and then may be later aggregated at the index level
* along with index-level flag stat fields (e.g., Mappings).
*/
-public class CommonStatsFlags implements Writeable, Cloneable {
+public final class CommonStatsFlags implements Writeable, Cloneable {
public static final CommonStatsFlags ALL = new CommonStatsFlags().all();
public static final CommonStatsFlags SHARD_LEVEL = new CommonStatsFlags().all().set(Flag.Mappings, false);
@@ -40,7 +40,6 @@ public class CommonStatsFlags implements Writeable, Cloneable {
/**
* @param flags flags to set. If no flags are supplied, default flags will be set.
*/
- @SuppressWarnings("this-escape")
public CommonStatsFlags(Flag... flags) {
if (flags.length > 0) {
clear();
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
index 64505d76e26b..0505f41b2759 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
@@ -29,7 +29,7 @@
*
* The request requires the query to be set using {@link #query(QueryBuilder)}
*/
-public class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject {
+public final class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject {
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, false);
@@ -65,7 +65,6 @@ public ValidateQueryRequest(StreamInput in) throws IOException {
* Constructs a new validate request against the provided indices. No indices provided means it will
* run against all indices.
*/
- @SuppressWarnings("this-escape")
public ValidateQueryRequest(String... indices) {
super(indices);
indicesOptions(DEFAULT_INDICES_OPTIONS);
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
index f3473f274bf3..bd929b9a2204 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java
@@ -24,7 +24,10 @@
import java.io.IOException;
import java.util.Set;
-public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable, RawIndexingDataTransportRequest {
+public final class BulkShardRequest extends ReplicatedWriteRequest
+ implements
+ Accountable,
+ RawIndexingDataTransportRequest {
private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class);
@@ -35,7 +38,6 @@ public BulkShardRequest(StreamInput in) throws IOException {
items = in.readArray(i -> i.readOptionalWriteable(inpt -> new BulkItemRequest(shardId, inpt)), BulkItemRequest[]::new);
}
- @SuppressWarnings("this-escape")
public BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
super(shardId);
this.items = items;
diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java
index 68a4e0d0b04c..4ecb092f34d4 100644
--- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java
+++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java
@@ -7,7 +7,6 @@
*/
package org.elasticsearch.action.datastreams;
-import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.IndicesRequest;
@@ -66,20 +65,14 @@ public ActionRequestValidationException validate() {
public Request(StreamInput in) throws IOException {
super(in);
this.name = in.readString();
- if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) {
- this.startTime = in.readVLong();
- } else {
- this.startTime = System.currentTimeMillis();
- }
+ this.startTime = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
- if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) {
- out.writeVLong(startTime);
- }
+ out.writeVLong(startTime);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
index 487a5c565399..b6a9179b1e95 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java
@@ -455,7 +455,7 @@ public String toString() {
* and how many of them were skipped and further details in a Map of Cluster objects
* (when doing a cross-cluster search).
*/
- public static class Clusters implements ToXContentFragment, Writeable {
+ public static final class Clusters implements ToXContentFragment, Writeable {
public static final Clusters EMPTY = new Clusters(0, 0, 0);
@@ -538,7 +538,6 @@ public Clusters(int total, int successful, int skipped) {
this.clusterInfo = Collections.emptyMap(); // will never be used if created from this constructor
}
- @SuppressWarnings("this-escape")
public Clusters(StreamInput in) throws IOException {
this.total = in.readVInt();
int successfulTemp = in.readVInt();
diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
index 9cfe0a1f1b99..b1594bf5ba93 100644
--- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
+++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardOperationFailedException.java
@@ -20,7 +20,7 @@
*
*
*/
-public class BroadcastShardOperationFailedException extends ElasticsearchException implements ElasticsearchWrapperException {
+public final class BroadcastShardOperationFailedException extends ElasticsearchException implements ElasticsearchWrapperException {
public BroadcastShardOperationFailedException(ShardId shardId, String msg) {
this(shardId, msg, null);
@@ -30,7 +30,6 @@ public BroadcastShardOperationFailedException(ShardId shardId, Throwable cause)
this(shardId, "", cause);
}
- @SuppressWarnings("this-escape")
public BroadcastShardOperationFailedException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
index 1f347ec2b8ca..1604ff81603a 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java
@@ -661,13 +661,11 @@ public interface ReplicaResponse {
}
- public static class RetryOnPrimaryException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
+ public static final class RetryOnPrimaryException extends ElasticsearchException {
public RetryOnPrimaryException(ShardId shardId, String msg) {
this(shardId, msg, null);
}
- @SuppressWarnings("this-escape")
RetryOnPrimaryException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 411f23a0fc0a..0abe7ad678dc 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -605,9 +605,8 @@ protected Releasable checkReplicaLimits(final ReplicaRequest request) {
return () -> {};
}
- public static class RetryOnReplicaException extends ElasticsearchException {
+ public static final class RetryOnReplicaException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public RetryOnReplicaException(ShardId shardId, String msg) {
super(msg);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
index 56edc5117a28..650b9db7f3d6 100644
--- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
@@ -50,7 +50,7 @@
*/
// It's not possible to suppress teh warning at #realtime(boolean) at a method-level.
@SuppressWarnings("unchecked")
-public class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest {
+public final class TermVectorsRequest extends SingleShardRequest implements RealtimeRequest {
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TermVectorsRequest.class);
private static final ParseField INDEX = new ParseField("_index");
@@ -79,7 +79,7 @@ public class TermVectorsRequest extends SingleShardRequest i
private long version = Versions.MATCH_ANY;
- protected String preference;
+ private String preference;
private static final AtomicInteger randomInt = new AtomicInteger(0);
@@ -204,7 +204,6 @@ public TermVectorsRequest(TermVectorsRequest other) {
this.filterSettings = other.filterSettings();
}
- @SuppressWarnings("this-escape")
public TermVectorsRequest(MultiGetRequest.Item item) {
super(item.index());
this.id = item.id();
diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
index 628d2bce2815..eb2c2b7f6738 100644
--- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
+++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java
@@ -174,7 +174,7 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException {
// initialize probes before the security manager is installed
initializeProbes();
- Runtime.getRuntime().addShutdownHook(new Thread(Elasticsearch::shutdown));
+ Runtime.getRuntime().addShutdownHook(new Thread(Elasticsearch::shutdown, "elasticsearch-shutdown"));
// look for jar hell
final Logger logger = LogManager.getLogger(JarHell.class);
@@ -376,7 +376,7 @@ private static void startCliMonitorThread(InputStream stdin) {
Bootstrap.exit(1);
}
}
- }).start();
+ }, "elasticsearch-cli-monitor-thread").start();
}
/**
diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index 8bcb6a28fb50..0f84ecab5f8b 100644
--- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -841,9 +841,8 @@ public String toString() {
}
}
- public static class NoLongerPrimaryShardException extends ElasticsearchException {
+ public static final class NoLongerPrimaryShardException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public NoLongerPrimaryShardException(ShardId shardId, String msg) {
super(msg);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java
index 86e5d6739fcb..feb0543aad62 100644
--- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java
+++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java
@@ -61,7 +61,7 @@
* considering a follower to be faulty, to allow for a brief network partition or a long GC cycle to occur without triggering the removal of
* a node and the consequent shard reallocation.
*/
-public class FollowersChecker {
+public final class FollowersChecker {
private static final Logger logger = LogManager.getLogger(FollowersChecker.class);
@@ -105,7 +105,6 @@ public class FollowersChecker {
private final NodeHealthService nodeHealthService;
private volatile FastResponseState fastResponseState;
- @SuppressWarnings("this-escape")
public FollowersChecker(
Settings settings,
TransportService transportService,
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java
index 7a40d7fd774d..35b7d957bf07 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java
@@ -224,7 +224,7 @@ public String toString() {
}
}
- public static class Builder {
+ public static final class Builder {
private static final Set VALID_FIELDS = Set.of("order", "mappings", "settings", "index_patterns", "aliases", "version");
@@ -248,7 +248,6 @@ public Builder(String name) {
aliases = new HashMap<>();
}
- @SuppressWarnings("this-escape")
public Builder(IndexTemplateMetadata indexTemplateMetadata) {
this.name = indexTemplateMetadata.name();
order(indexTemplateMetadata.order());
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java
index 853a26263fe9..5df5de43cffd 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java
@@ -31,15 +31,18 @@
import java.util.Map;
import java.util.function.ToDoubleFunction;
-public record ClusterBalanceStats(Map tiers, Map nodes)
- implements
- Writeable,
- ToXContentObject {
+public record ClusterBalanceStats(
+ int shards,
+ int undesiredShardAllocations,
+ Map tiers,
+ Map nodes
+) implements Writeable, ToXContentObject {
- public static ClusterBalanceStats EMPTY = new ClusterBalanceStats(Map.of(), Map.of());
+ public static ClusterBalanceStats EMPTY = new ClusterBalanceStats(0, 0, Map.of(), Map.of());
public static ClusterBalanceStats createFrom(
ClusterState clusterState,
+ DesiredBalance desiredBalance,
ClusterInfo clusterInfo,
WriteLoadForecaster writeLoadForecaster
) {
@@ -50,32 +53,60 @@ public static ClusterBalanceStats createFrom(
if (dataRoles.isEmpty()) {
continue;
}
- var nodeStats = NodeBalanceStats.createFrom(routingNode, clusterState.metadata(), clusterInfo, writeLoadForecaster);
+ var nodeStats = NodeBalanceStats.createFrom(
+ routingNode,
+ clusterState.metadata(),
+ desiredBalance,
+ clusterInfo,
+ writeLoadForecaster
+ );
nodes.put(routingNode.node().getName(), nodeStats);
for (DiscoveryNodeRole role : dataRoles) {
tierToNodeStats.computeIfAbsent(role.roleName(), ignored -> new ArrayList<>()).add(nodeStats);
}
}
- return new ClusterBalanceStats(Maps.transformValues(tierToNodeStats, TierBalanceStats::createFrom), nodes);
+ return new ClusterBalanceStats(
+ nodes.values().stream().mapToInt(NodeBalanceStats::shards).sum(),
+ nodes.values().stream().mapToInt(NodeBalanceStats::undesiredShardAllocations).sum(),
+ Maps.transformValues(tierToNodeStats, TierBalanceStats::createFrom),
+ nodes
+ );
}
public static ClusterBalanceStats readFrom(StreamInput in) throws IOException {
- return new ClusterBalanceStats(in.readImmutableMap(TierBalanceStats::readFrom), in.readImmutableMap(NodeBalanceStats::readFrom));
+ return new ClusterBalanceStats(
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED) ? in.readVInt() : -1,
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED) ? in.readVInt() : -1,
+ in.readImmutableMap(TierBalanceStats::readFrom),
+ in.readImmutableMap(NodeBalanceStats::readFrom)
+ );
}
@Override
public void writeTo(StreamOutput out) throws IOException {
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ out.writeVInt(shards);
+ }
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ out.writeVInt(undesiredShardAllocations);
+ }
out.writeMap(tiers, StreamOutput::writeWriteable);
out.writeMap(nodes, StreamOutput::writeWriteable);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- return builder.startObject().field("tiers").map(tiers).field("nodes").map(nodes).endObject();
+ return builder.startObject()
+ .field("shard_count", shards)
+ .field("undesired_shard_allocation_count", undesiredShardAllocations)
+ .field("tiers", tiers)
+ .field("nodes", nodes)
+ .endObject();
}
public record TierBalanceStats(
MetricStats shardCount,
+ MetricStats undesiredShardAllocations,
MetricStats forecastWriteLoad,
MetricStats forecastShardSize,
MetricStats actualShardSize
@@ -84,6 +115,7 @@ public record TierBalanceStats(
private static TierBalanceStats createFrom(List nodes) {
return new TierBalanceStats(
MetricStats.createFrom(nodes, it -> it.shards),
+ MetricStats.createFrom(nodes, it -> it.undesiredShardAllocations),
MetricStats.createFrom(nodes, it -> it.forecastWriteLoad),
MetricStats.createFrom(nodes, it -> it.forecastShardSize),
MetricStats.createFrom(nodes, it -> it.actualShardSize)
@@ -93,6 +125,9 @@ private static TierBalanceStats createFrom(List nodes) {
public static TierBalanceStats readFrom(StreamInput in) throws IOException {
return new TierBalanceStats(
MetricStats.readFrom(in),
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)
+ ? MetricStats.readFrom(in)
+ : new MetricStats(0.0, 0.0, 0.0, 0.0, 0.0),
MetricStats.readFrom(in),
MetricStats.readFrom(in),
MetricStats.readFrom(in)
@@ -102,6 +137,9 @@ public static TierBalanceStats readFrom(StreamInput in) throws IOException {
@Override
public void writeTo(StreamOutput out) throws IOException {
shardCount.writeTo(out);
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ undesiredShardAllocations.writeTo(out);
+ }
forecastWriteLoad.writeTo(out);
forecastShardSize.writeTo(out);
actualShardSize.writeTo(out);
@@ -111,6 +149,7 @@ public void writeTo(StreamOutput out) throws IOException {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject()
.field("shard_count", shardCount)
+ .field("undesired_shard_allocation_count", undesiredShardAllocations)
.field("forecast_write_load", forecastWriteLoad)
.field("forecast_disk_usage", forecastShardSize)
.field("actual_disk_usage", actualShardSize)
@@ -172,6 +211,7 @@ public record NodeBalanceStats(
String nodeId,
List roles,
int shards,
+ int undesiredShardAllocations,
double forecastWriteLoad,
long forecastShardSize,
long actualShardSize
@@ -182,9 +222,11 @@ public record NodeBalanceStats(
private static NodeBalanceStats createFrom(
RoutingNode routingNode,
Metadata metadata,
+ DesiredBalance desiredBalance,
ClusterInfo clusterInfo,
WriteLoadForecaster writeLoadForecaster
) {
+ int undesired = 0;
double forecastWriteLoad = 0.0;
long forecastShardSize = 0L;
long actualShardSize = 0L;
@@ -196,23 +238,37 @@ private static NodeBalanceStats createFrom(
forecastWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0);
forecastShardSize += indexMetadata.getForecastedShardSizeInBytes().orElse(shardSize);
actualShardSize += shardSize;
+ if (isDesiredShardAllocation(shardRouting, desiredBalance) == false) {
+ undesired++;
+ }
}
return new NodeBalanceStats(
routingNode.nodeId(),
routingNode.node().getRoles().stream().map(DiscoveryNodeRole::roleName).toList(),
routingNode.size(),
+ undesired,
forecastWriteLoad,
forecastShardSize,
actualShardSize
);
}
+ private static boolean isDesiredShardAllocation(ShardRouting shardRouting, DesiredBalance desiredBalance) {
+ if (shardRouting.relocating()) {
+ // relocating out shards are temporarily accepted
+ return true;
+ }
+ var assignment = desiredBalance.getAssignment(shardRouting.shardId());
+ return assignment != null && assignment.nodeIds().contains(shardRouting.currentNodeId());
+ }
+
public static NodeBalanceStats readFrom(StreamInput in) throws IOException {
return new NodeBalanceStats(
in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readString() : UNKNOWN_NODE_ID,
in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readStringCollectionAsList() : List.of(),
in.readInt(),
+ in.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED) ? in.readVInt() : -1,
in.readDouble(),
in.readLong(),
in.readLong()
@@ -228,6 +284,9 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeStringCollection(roles);
}
out.writeInt(shards);
+ if (out.getTransportVersion().onOrAfter(TransportVersions.UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED)) {
+ out.writeVInt(undesiredShardAllocations);
+ }
out.writeDouble(forecastWriteLoad);
out.writeLong(forecastShardSize);
out.writeLong(actualShardSize);
@@ -241,6 +300,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
return builder.field("roles", roles)
.field("shard_count", shards)
+ .field("undesired_shard_allocation_count", undesiredShardAllocations)
.field("forecast_write_load", forecastWriteLoad)
.humanReadableField("forecast_disk_usage_bytes", "forecast_disk_usage", ByteSizeValue.ofBytes(forecastShardSize))
.humanReadableField("actual_disk_usage_bytes", "actual_disk_usage", ByteSizeValue.ofBytes(actualShardSize))
diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
index e6ed24dc7220..74de0b2e03e6 100644
--- a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
+++ b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java
@@ -25,10 +25,10 @@
import java.io.IOException;
import java.util.Locale;
-public class GeoPoint implements SpatialPoint, ToXContentFragment {
+public final class GeoPoint implements SpatialPoint, ToXContentFragment {
- protected double lat;
- protected double lon;
+ private double lat;
+ private double lon;
public GeoPoint() {}
@@ -38,7 +38,6 @@ public GeoPoint() {}
*
* @param value String to create the point from
*/
- @SuppressWarnings("this-escape")
public GeoPoint(String value) {
this.resetFromString(value);
}
diff --git a/server/src/main/java/org/elasticsearch/common/inject/CreationException.java b/server/src/main/java/org/elasticsearch/common/inject/CreationException.java
index f09248de947e..78f89e95e5ff 100644
--- a/server/src/main/java/org/elasticsearch/common/inject/CreationException.java
+++ b/server/src/main/java/org/elasticsearch/common/inject/CreationException.java
@@ -27,13 +27,12 @@
*
* @author crazybob@google.com (Bob Lee)
*/
-public class CreationException extends RuntimeException {
+public final class CreationException extends RuntimeException {
private final Collection messages;
/**
* Creates a CreationException containing {@code messages}.
*/
- @SuppressWarnings("this-escape")
public CreationException(Collection messages) {
this.messages = messages;
if (this.messages.isEmpty()) {
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java
index c7e9a4abf2c5..478ae231e16f 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java
@@ -17,18 +17,16 @@
* Resettable {@link StreamInput} that wraps a byte array. It is heavily inspired in Lucene's
* {@link org.apache.lucene.store.ByteArrayDataInput}.
*/
-public class ByteArrayStreamInput extends StreamInput {
+public final class ByteArrayStreamInput extends StreamInput {
private byte[] bytes;
private int pos;
private int limit;
- @SuppressWarnings("this-escape")
public ByteArrayStreamInput() {
reset(BytesRef.EMPTY_BYTES);
}
- @SuppressWarnings("this-escape")
public ByteArrayStreamInput(byte[] bytes) {
reset(bytes);
}
diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java
index 6a02bedcdf08..42fb7f4a6afe 100644
--- a/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java
+++ b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java
@@ -17,9 +17,8 @@
* This {@link StreamOutput} writes nowhere. It can be used to check if serialization would
* be successful writing to a specific version.
*/
-public class VersionCheckingStreamOutput extends StreamOutput {
+public final class VersionCheckingStreamOutput extends StreamOutput {
- @SuppressWarnings("this-escape")
public VersionCheckingStreamOutput(TransportVersion version) {
setTransportVersion(version);
}
diff --git a/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java
index 54b5749b797f..93ca7a9615be 100644
--- a/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java
+++ b/server/src/main/java/org/elasticsearch/common/logging/ECSJsonLayout.java
@@ -32,14 +32,13 @@ public static ECSJsonLayout.Builder newBuilder() {
return new ECSJsonLayout.Builder().asBuilder();
}
- public static class Builder extends AbstractStringLayout.Builder
+ public static final class Builder extends AbstractStringLayout.Builder
implements
org.apache.logging.log4j.core.util.Builder {
@PluginAttribute("dataset")
String dataset;
- @SuppressWarnings("this-escape")
public Builder() {
setCharset(StandardCharsets.UTF_8);
}
diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java
index fb7475e3cba5..a5272b8074d7 100644
--- a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java
+++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java
@@ -147,7 +147,7 @@ PatternLayout getPatternLayout() {
return patternLayout;
}
- public static class Builder> extends AbstractStringLayout.Builder
+ public static final class Builder> extends AbstractStringLayout.Builder
implements
org.apache.logging.log4j.core.util.Builder {
@@ -163,7 +163,6 @@ public static class Builder> extends AbstractS
@PluginConfiguration
private Configuration config;
- @SuppressWarnings("this-escape")
public Builder() {
setCharset(StandardCharsets.UTF_8);
}
diff --git a/server/src/main/java/org/elasticsearch/common/metrics/Counters.java b/server/src/main/java/org/elasticsearch/common/metrics/Counters.java
index 665ed371955c..9606fc768759 100644
--- a/server/src/main/java/org/elasticsearch/common/metrics/Counters.java
+++ b/server/src/main/java/org/elasticsearch/common/metrics/Counters.java
@@ -28,11 +28,10 @@
* that will not have conflicts, which means that there no counter will have a label which is a substring of the label of another counter.
* For example, the counters `foo: 1` and `foo.bar: 3` cannot co-exist in a nested map.
*/
-public class Counters implements Writeable {
+public final class Counters implements Writeable {
private final ConcurrentMap counters = new ConcurrentHashMap<>();
- @SuppressWarnings("this-escape")
public Counters(StreamInput in) throws IOException {
int numCounters = in.readVInt();
for (int i = 0; i < numCounters; i++) {
diff --git a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java
index 1ac3db3827eb..b3639079cc92 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java
@@ -65,7 +65,7 @@
* }
* }
*/
-public class LocallyMountedSecrets implements SecureSettings {
+public final class LocallyMountedSecrets implements SecureSettings {
public static final String SECRETS_FILE_NAME = "secrets.json";
public static final String SECRETS_DIRECTORY = "secrets";
@@ -116,7 +116,6 @@ public class LocallyMountedSecrets implements SecureSettings {
/**
* Direct constructor to be used by the CLI
*/
- @SuppressWarnings("this-escape")
public LocallyMountedSecrets(Environment environment) {
var secretsDirPath = resolveSecretsDir(environment);
var secretsFilePath = resolveSecretsFile(environment);
diff --git a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java
index 91dbfc30123f..c78db448380b 100644
--- a/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java
+++ b/server/src/main/java/org/elasticsearch/common/util/BytesRefArray.java
@@ -22,7 +22,7 @@
/**
* Compact serializable container for ByteRefs
*/
-public class BytesRefArray implements Accountable, Releasable, Writeable {
+public final class BytesRefArray implements Accountable, Releasable, Writeable {
// base size of the bytes ref array
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArray.class);
@@ -32,7 +32,6 @@ public class BytesRefArray implements Accountable, Releasable, Writeable {
private ByteArray bytes;
private long size;
- @SuppressWarnings("this-escape")
public BytesRefArray(long capacity, BigArrays bigArrays) {
this.bigArrays = bigArrays;
boolean success = false;
@@ -49,7 +48,6 @@ public BytesRefArray(long capacity, BigArrays bigArrays) {
size = 0;
}
- @SuppressWarnings("this-escape")
public BytesRefArray(StreamInput in, BigArrays bigArrays) throws IOException {
this.bigArrays = bigArrays;
// we allocate big arrays so we have to `close` if we fail here or we'll leak them.
diff --git a/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java b/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java
index f54500a806cc..860060ca5a34 100644
--- a/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java
+++ b/server/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java
@@ -17,7 +17,7 @@
* A hash table from native longs to objects. This implementation resolves collisions
* using open-addressing and does not support null values. This class is not thread-safe.
*/
-public class LongObjectPagedHashMap extends AbstractPagedHashMap implements Iterable> {
+public final class LongObjectPagedHashMap extends AbstractPagedHashMap implements Iterable> {
private LongArray keys;
private ObjectArray values;
@@ -26,7 +26,6 @@ public LongObjectPagedHashMap(long capacity, BigArrays bigArrays) {
this(capacity, DEFAULT_MAX_LOAD_FACTOR, bigArrays);
}
- @SuppressWarnings("this-escape")
public LongObjectPagedHashMap(long capacity, float maxLoadFactor, BigArrays bigArrays) {
super(capacity, maxLoadFactor, bigArrays);
boolean success = false;
diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java
index cc685b26ce23..0380bb80e001 100644
--- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java
+++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java
@@ -199,7 +199,7 @@ public String toString() {
*/
static final String SEARCHABLE_SHARED_CACHE_FILE = "shared_snapshot_cache";
- public static class NodeLock implements Releasable {
+ public static final class NodeLock implements Releasable {
private final Lock[] locks;
private final DataPath[] dataPaths;
@@ -213,7 +213,6 @@ public NodeLock(final Logger logger, final Environment environment, final Checke
* Tries to acquire a node lock for a node id, throws {@code IOException} if it is unable to acquire it
* @param pathFunction function to check node path before attempt of acquiring a node lock
*/
- @SuppressWarnings("this-escape")
public NodeLock(
final Logger logger,
final Environment environment,
@@ -990,7 +989,7 @@ private final class InternalShardLock {
lockDetails = Tuple.tuple(System.nanoTime(), details);
}
- protected void release() {
+ private void release() {
mutex.release();
decWaitCount();
}
diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java
index a714ee4cf5ec..77415bbaea94 100644
--- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java
+++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java
@@ -114,7 +114,7 @@ public void verifyUpgradeToCurrentVersion() {
assert (nodeVersion.equals(Version.V_EMPTY) == false) || (Version.CURRENT.major <= Version.V_7_0_0.major + 1)
: "version is required in the node metadata from v9 onwards";
- if (NodeMetadata.isNodeVersionWireCompatible(nodeVersion.toString()) == false) {
+ if (nodeVersion.before(Version.CURRENT.minimumCompatibilityVersion())) {
throw new IllegalStateException(
"cannot upgrade a node from version ["
+ nodeVersion
@@ -222,20 +222,4 @@ public NodeMetadata fromXContent(XContentParser parser) throws IOException {
public static final MetadataStateFormat FORMAT = new NodeMetadataStateFormat(false);
- /**
- * Check whether a node version is compatible with the current minimum transport version.
- * @param version A version identifier as a string
- * @throws IllegalArgumentException if version is not a valid transport version identifier
- * @return true if the version is compatible, false otherwise
- */
- // visible for testing
- static boolean isNodeVersionWireCompatible(String version) {
- try {
- Version esVersion = Version.fromString(version);
- return esVersion.onOrAfter(Version.CURRENT.minimumCompatibilityVersion());
- } catch (IllegalArgumentException e) {
- throw new IllegalArgumentException("Cannot parse [" + version + "] as a transport version identifier", e);
- }
- }
-
}
diff --git a/server/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java b/server/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java
index 001b35ab11cb..4aab8b91a073 100644
--- a/server/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java
+++ b/server/src/main/java/org/elasticsearch/env/ShardLockObtainFailedException.java
@@ -17,15 +17,13 @@
/**
* Exception used when the in-memory lock for a shard cannot be obtained
*/
-public class ShardLockObtainFailedException extends ElasticsearchException {
+public final class ShardLockObtainFailedException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public ShardLockObtainFailedException(ShardId shardId, String message) {
super(buildMessage(shardId, message));
this.setShard(shardId);
}
- @SuppressWarnings("this-escape")
public ShardLockObtainFailedException(ShardId shardId, String message, Throwable cause) {
super(buildMessage(shardId, message), cause);
this.setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java
index dd3176cf912a..8dcea1bb0e7e 100644
--- a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java
+++ b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java
@@ -51,4 +51,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli
new RestChunkedToXContentListener<>(channel)
);
}
+
+ @Override
+ public boolean canTripCircuitBreaker() {
+ return false;
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java b/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java
index 18c009deb884..f25168bd4e98 100644
--- a/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java
+++ b/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java
@@ -74,7 +74,7 @@ protected TransportHealthNodeAction(
Writeable.Reader response,
Executor executor
) {
- super(actionName, true, transportService, actionFilters, request, EsExecutors.DIRECT_EXECUTOR_SERVICE);
+ super(actionName, false, transportService, actionFilters, request, EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.transportService = transportService;
this.clusterService = clusterService;
this.threadPool = threadPool;
diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java
index df1aca3dc7b5..ee2cb06cb955 100644
--- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java
+++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java
@@ -37,7 +37,7 @@
* per index in real time via the mapping API. If no specific postings format or vector format is
* configured for a specific field the default postings or vector format is used.
*/
-public class PerFieldMapperCodec extends Lucene95Codec {
+public final class PerFieldMapperCodec extends Lucene95Codec {
private final MapperService mapperService;
private final DocValuesFormat docValuesFormat = new Lucene90DocValuesFormat();
@@ -49,7 +49,6 @@ public class PerFieldMapperCodec extends Lucene95Codec {
: "PerFieldMapperCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC;
}
- @SuppressWarnings("this-escape")
public PerFieldMapperCodec(Mode compressionMode, MapperService mapperService, BigArrays bigArrays) {
super(compressionMode);
this.mapperService = mapperService;
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java
index 0457a23d8510..403156c95540 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java
@@ -191,11 +191,10 @@ protected String contentType() {
return CONTENT_TYPE;
}
- public static class CustomBinaryDocValuesField extends CustomDocValuesField {
+ public static final class CustomBinaryDocValuesField extends CustomDocValuesField {
private final List bytesList;
- @SuppressWarnings("this-escape")
public CustomBinaryDocValuesField(String name, byte[] bytes) {
super(name);
bytesList = new ArrayList<>();
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java
index 1fb3f706c56a..a5793df3b82e 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java
@@ -68,7 +68,7 @@ private static BooleanFieldMapper toType(FieldMapper in) {
return (BooleanFieldMapper) in;
}
- public static class Builder extends FieldMapper.Builder {
+ public static final class Builder extends FieldMapper.Builder {
private final Parameter docValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true);
private final Parameter indexed = Parameter.indexParam(m -> toType(m).indexed, true);
@@ -93,7 +93,6 @@ public static class Builder extends FieldMapper.Builder {
private final IndexVersion indexCreatedVersion;
- @SuppressWarnings("this-escape")
public Builder(String name, ScriptCompiler scriptCompiler, boolean ignoreMalformedByDefault, IndexVersion indexCreatedVersion) {
super(name);
this.scriptCompiler = Objects.requireNonNull(scriptCompiler);
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java
index 21b9ec04c56c..9d12fc6910d6 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java
@@ -223,7 +223,7 @@ private static DateFieldMapper toType(FieldMapper in) {
return (DateFieldMapper) in;
}
- public static class Builder extends FieldMapper.Builder {
+ public static final class Builder extends FieldMapper.Builder {
private final Parameter index = Parameter.indexParam(m -> toType(m).indexed, true);
private final Parameter docValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true);
@@ -253,7 +253,6 @@ public static class Builder extends FieldMapper.Builder {
private final IndexVersion indexCreatedVersion;
private final ScriptCompiler scriptCompiler;
- @SuppressWarnings("this-escape")
public Builder(
String name,
Resolution resolution,
@@ -396,11 +395,11 @@ public DateFieldMapper build(MapperBuilderContext context) {
}, MINIMUM_COMPATIBILITY_VERSION);
public static final class DateFieldType extends MappedFieldType {
- protected final DateFormatter dateTimeFormatter;
- protected final DateMathParser dateMathParser;
- protected final Resolution resolution;
- protected final String nullValue;
- protected final FieldValues scriptValues;
+ final DateFormatter dateTimeFormatter;
+ final DateMathParser dateMathParser;
+ private final Resolution resolution;
+ private final String nullValue;
+ private final FieldValues scriptValues;
private final boolean pointsMetadataAvailable;
public DateFieldType(
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java
index f97817570838..10e24fbeebb8 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java
@@ -76,7 +76,7 @@ private static GeoPointFieldMapper toType(FieldMapper in) {
return (GeoPointFieldMapper) in;
}
- public static class Builder extends FieldMapper.Builder {
+ public static final class Builder extends FieldMapper.Builder {
final Parameter> ignoreMalformed;
final Parameter> ignoreZValue = ignoreZValueParam(m -> builder(m).ignoreZValue.get());
@@ -94,7 +94,6 @@ public static class Builder extends FieldMapper.Builder {
private final Parameter dimension; // can only support time_series_dimension: false
private final IndexMode indexMode; // either STANDARD or TIME_SERIES
- @SuppressWarnings("this-escape")
public Builder(
String name,
ScriptCompiler scriptCompiler,
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java
index 7d6b7711360f..80fd384f15fb 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java
@@ -69,7 +69,7 @@ private static IpFieldMapper toType(FieldMapper in) {
return (IpFieldMapper) in;
}
- public static class Builder extends FieldMapper.Builder {
+ public static final class Builder extends FieldMapper.Builder {
private final Parameter indexed = Parameter.indexParam(m -> toType(m).indexed, true);
private final Parameter hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true);
@@ -89,7 +89,6 @@ public static class Builder extends FieldMapper.Builder {
private final IndexVersion indexCreatedVersion;
private final ScriptCompiler scriptCompiler;
- @SuppressWarnings("this-escape")
public Builder(String name, ScriptCompiler scriptCompiler, boolean ignoreMalformedByDefault, IndexVersion indexCreatedVersion) {
super(name);
this.scriptCompiler = Objects.requireNonNull(scriptCompiler);
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
index 9bc3db22365d..f15bb0069570 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
@@ -137,7 +137,7 @@ private static KeywordFieldMapper toType(FieldMapper in) {
return (KeywordFieldMapper) in;
}
- public static class Builder extends FieldMapper.Builder {
+ public static final class Builder extends FieldMapper.Builder {
private final Parameter indexed = Parameter.indexParam(m -> toType(m).indexed, true);
private final Parameter hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true);
@@ -184,7 +184,6 @@ public static class Builder extends FieldMapper.Builder {
private final ScriptCompiler scriptCompiler;
private final IndexVersion indexCreatedVersion;
- @SuppressWarnings("this-escape")
public Builder(String name, IndexAnalyzers indexAnalyzers, ScriptCompiler scriptCompiler, IndexVersion indexCreatedVersion) {
super(name);
this.indexAnalyzers = indexAnalyzers;
@@ -1008,7 +1007,7 @@ public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() {
return syntheticFieldLoader(simpleName());
}
- protected SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String simpleName) {
+ SourceLoader.SyntheticFieldLoader syntheticFieldLoader(String simpleName) {
if (hasScript()) {
return SourceLoader.SyntheticFieldLoader.NOTHING;
}
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
index 7dfc5a98037d..84e9e84fb8ce 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java
@@ -89,7 +89,7 @@ private static NumberFieldMapper toType(FieldMapper in) {
private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099);
- public static class Builder extends FieldMapper.Builder {
+ public static final class Builder extends FieldMapper.Builder {
private final Parameter indexed;
private final Parameter hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true);
@@ -143,7 +143,6 @@ public static Builder docValuesOnly(String name, NumberType type, IndexVersion i
return builder;
}
- @SuppressWarnings("this-escape")
public Builder(
String name,
NumberType type,
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
index 91616041f65f..1949249b9be2 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
@@ -87,7 +87,7 @@
import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES;
/** A {@link FieldMapper} for full-text fields. */
-public class TextFieldMapper extends FieldMapper {
+public final class TextFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "text";
private static final String FAST_PHRASE_SUFFIX = "._index_phrase";
@@ -1155,8 +1155,7 @@ public Query existsQuery(SearchExecutionContext context) {
private final SubFieldInfo prefixFieldInfo;
private final SubFieldInfo phraseFieldInfo;
- @SuppressWarnings("this-escape")
- protected TextFieldMapper(
+ private TextFieldMapper(
String simpleName,
FieldType fieldType,
TextFieldType mappedFieldType,
diff --git a/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java
index 2533b5b61106..b869096c12fb 100644
--- a/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java
@@ -50,7 +50,7 @@
* A query that matches on multiple text fields, as if the field contents had been indexed
* into a single combined field.
*/
-public class CombinedFieldsQueryBuilder extends AbstractQueryBuilder {
+public final class CombinedFieldsQueryBuilder extends AbstractQueryBuilder {
public static final String NAME = "combined_fields";
private static final ParseField QUERY_FIELD = new ParseField("query");
@@ -109,7 +109,6 @@ public class CombinedFieldsQueryBuilder extends AbstractQueryBuilder {
+public final class MultiMatchQueryBuilder extends AbstractQueryBuilder {
public static final String NAME = "multi_match";
private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "cutoff_freqency is not supported."
@@ -185,7 +185,6 @@ public MultiMatchQueryBuilder.Type getType() {
/**
* Constructs a new text query.
*/
- @SuppressWarnings("this-escape")
public MultiMatchQueryBuilder(Object value, String... fields) {
if (value == null) {
throw new IllegalArgumentException("[" + NAME + "] requires query value");
@@ -203,7 +202,6 @@ public MultiMatchQueryBuilder(Object value, String... fields) {
/**
* Read from a stream.
*/
- @SuppressWarnings("this-escape")
public MultiMatchQueryBuilder(StreamInput in) throws IOException {
super(in);
value = in.readGenericValue();
diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java
index 0b0b35b61953..fd290e56f7e4 100644
--- a/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java
+++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardException.java
@@ -18,7 +18,7 @@
/**
* Exception that is thrown when creating lucene queries on the shard
*/
-public class QueryShardException extends ElasticsearchException {
+public final class QueryShardException extends ElasticsearchException {
public QueryShardException(QueryRewriteContext context, String msg, Object... args) {
this(context, msg, null, args);
@@ -32,7 +32,6 @@ public QueryShardException(QueryRewriteContext context, String msg, Throwable ca
* This constructor is provided for use in unit tests where a
* {@link SearchExecutionContext} may not be available
*/
- @SuppressWarnings("this-escape")
public QueryShardException(Index index, String msg, Throwable cause, Object... args) {
super(msg, cause, args);
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
index 195e1d51c806..38ca88e8a937 100644
--- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
@@ -45,7 +45,7 @@
* (using {@link #field(String)}), will run the parsed query against the provided fields, and combine
* them using Dismax.
*/
-public class QueryStringQueryBuilder extends AbstractQueryBuilder {
+public final class QueryStringQueryBuilder extends AbstractQueryBuilder {
public static final String NAME = "query_string";
@@ -153,7 +153,6 @@ public QueryStringQueryBuilder(String queryString) {
/**
* Read from a stream.
*/
- @SuppressWarnings("this-escape")
public QueryStringQueryBuilder(StreamInput in) throws IOException {
super(in);
queryString = in.readString();
diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java
index 8186c9c2d9a0..c4806dbd3a0a 100644
--- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java
+++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java
@@ -98,6 +98,8 @@ public class SearchExecutionContext extends QueryRewriteContext {
private final Map namedQueries = new HashMap<>();
private NestedScope nestedScope;
+ private QueryBuilder aliasFilter;
+
/**
* Build a {@linkplain SearchExecutionContext}.
*/
@@ -228,6 +230,15 @@ private void reset() {
this.nestedScope = new NestedScope();
}
+ // Set alias filter, so it can be applied for queries that need it (e.g. knn query)
+ public void setAliasFilter(QueryBuilder aliasFilter) {
+ this.aliasFilter = aliasFilter;
+ }
+
+ public QueryBuilder getAliasFilter() {
+ return aliasFilter;
+ }
+
/**
* The similarity to use in searches, which takes into account per-field configuration.
*/
diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
index 4f6ba803eb7a..b2067549fab6 100644
--- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
+++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
@@ -69,7 +69,7 @@
* "https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-simple-query-string-query.html"
* > online documentation.
*/
-public class SimpleQueryStringBuilder extends AbstractQueryBuilder {
+public final class SimpleQueryStringBuilder extends AbstractQueryBuilder {
/** Default for using lenient query parsing.*/
public static final boolean DEFAULT_LENIENT = false;
@@ -142,7 +142,6 @@ public SimpleQueryStringBuilder(String queryText) {
/**
* Read from a stream.
*/
- @SuppressWarnings("this-escape")
public SimpleQueryStringBuilder(StreamInput in) throws IOException {
super(in);
queryText = in.readString();
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java
index d03c1c2db06e..38ad67da2417 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRecoveryException.java
@@ -13,8 +13,7 @@
import java.io.IOException;
-public class IndexShardRecoveryException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
+public final class IndexShardRecoveryException extends ElasticsearchException {
public IndexShardRecoveryException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java b/server/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java
index fa66d8fe86ad..8c9ab0e30b28 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/ShardNotFoundException.java
@@ -13,7 +13,7 @@
import java.io.IOException;
-public class ShardNotFoundException extends ResourceNotFoundException {
+public final class ShardNotFoundException extends ResourceNotFoundException {
public ShardNotFoundException(ShardId shardId) {
this(shardId, null);
}
@@ -26,7 +26,6 @@ public ShardNotFoundException(ShardId shardId, String msg, Object... args) {
this(shardId, msg, null, args);
}
- @SuppressWarnings("this-escape")
public ShardNotFoundException(ShardId shardId, String msg, Throwable ex, Object... args) {
super(msg, ex, args);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java
index 843f049d30e4..140c4684d1a7 100644
--- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java
+++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java
@@ -119,9 +119,8 @@ public synchronized Copy moveToStarted(
this.totalFileCount = totalFileCount;
this.incrementalSize = incrementalSize;
this.totalSize = totalSize;
- } else if (isAborted()) {
- throw new AbortedSnapshotException();
} else {
+ ensureNotAborted();
assert false : "Should not try to move stage [" + stage.get() + "] to [STARTED]";
throw new IllegalStateException(
"Unable to move the shard snapshot status to [STARTED]: " + "expecting [INIT] but got [" + stage.get() + "]"
@@ -195,12 +194,8 @@ public ShardSnapshotResult getShardSnapshotResult() {
return shardSnapshotResult.get();
}
- public boolean isAborted() {
- return stage.get() == Stage.ABORTED;
- }
-
public void ensureNotAborted() {
- if (isAborted()) {
+ if (stage.get() == Stage.ABORTED) {
throw new AbortedSnapshotException();
}
}
@@ -243,15 +238,15 @@ public static IndexShardSnapshotStatus newInitializing(ShardGeneration generatio
return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, 0, null, generation);
}
- public static IndexShardSnapshotStatus newFailed(final String failure) {
+ public static IndexShardSnapshotStatus.Copy newFailed(final String failure) {
assert failure != null : "expecting non null failure for a failed IndexShardSnapshotStatus";
if (failure == null) {
throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus");
}
- return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, failure, null);
+ return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, failure, null).asCopy();
}
- public static IndexShardSnapshotStatus newDone(
+ public static IndexShardSnapshotStatus.Copy newDone(
final long startTime,
final long totalTime,
final int incrementalFileCount,
@@ -273,7 +268,7 @@ public static IndexShardSnapshotStatus newDone(
incrementalSize,
null,
generation
- );
+ ).asCopy();
}
/**
diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
index 5b67f58f06a3..2a8fe96151c1 100644
--- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
+++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
@@ -38,7 +38,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContentFragment {
/**
* Information about snapshotted file
*/
- public static class FileInfo implements Writeable {
+ public static final class FileInfo implements Writeable {
public static final String SERIALIZE_WRITER_UUID = "serialize_writer_uuid";
private final String name;
@@ -55,7 +55,6 @@ public static class FileInfo implements Writeable {
* @param metadata the files meta data
* @param partSize size of the single chunk
*/
- @SuppressWarnings("this-escape")
public FileInfo(String name, StoreFileMetadata metadata, @Nullable ByteSizeValue partSize) {
this.name = Objects.requireNonNull(name);
this.metadata = metadata;
diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogException.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogException.java
index 000b36b299fd..78b9b6424ece 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/TranslogException.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogException.java
@@ -14,13 +14,12 @@
import java.io.IOException;
-public class TranslogException extends ElasticsearchException {
+public final class TranslogException extends ElasticsearchException {
public TranslogException(ShardId shardId, String msg) {
this(shardId, msg, null);
}
- @SuppressWarnings("this-escape")
public TranslogException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java b/server/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java
index 371444b6a72d..744f8c2ed303 100644
--- a/server/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java
+++ b/server/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java
@@ -14,9 +14,8 @@
import java.io.IOException;
-public class AliasFilterParsingException extends ElasticsearchException {
+public final class AliasFilterParsingException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public AliasFilterParsingException(Index index, String name, String desc, Throwable ex) {
super("[" + name + "], " + desc, ex);
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/indices/IndexClosedException.java b/server/src/main/java/org/elasticsearch/indices/IndexClosedException.java
index 4c4035dbc635..f64e6758130b 100644
--- a/server/src/main/java/org/elasticsearch/indices/IndexClosedException.java
+++ b/server/src/main/java/org/elasticsearch/indices/IndexClosedException.java
@@ -18,9 +18,8 @@
/**
* Exception indicating that one or more requested indices are closed.
*/
-public class IndexClosedException extends ElasticsearchException {
+public final class IndexClosedException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public IndexClosedException(Index index) {
super("closed");
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/indices/IndexCreationException.java b/server/src/main/java/org/elasticsearch/indices/IndexCreationException.java
index a38bcc8ae02d..4558c5a7bc2b 100644
--- a/server/src/main/java/org/elasticsearch/indices/IndexCreationException.java
+++ b/server/src/main/java/org/elasticsearch/indices/IndexCreationException.java
@@ -14,9 +14,8 @@
import java.io.IOException;
-public class IndexCreationException extends ElasticsearchException implements ElasticsearchWrapperException {
+public final class IndexCreationException extends ElasticsearchException implements ElasticsearchWrapperException {
- @SuppressWarnings("this-escape")
public IndexCreationException(String index, Throwable cause) {
super("failed to create index [{}]", cause, index);
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java b/server/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java
index 819378bae0fe..1591ae4428fe 100644
--- a/server/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java
+++ b/server/src/main/java/org/elasticsearch/indices/IndexPrimaryShardNotAllocatedException.java
@@ -19,12 +19,11 @@
* Thrown when some action cannot be performed because the primary shard of
* some shard group in an index has not been allocated post api action.
*/
-public class IndexPrimaryShardNotAllocatedException extends ElasticsearchException {
+public final class IndexPrimaryShardNotAllocatedException extends ElasticsearchException {
public IndexPrimaryShardNotAllocatedException(StreamInput in) throws IOException {
super(in);
}
- @SuppressWarnings("this-escape")
public IndexPrimaryShardNotAllocatedException(Index index) {
super("primary not allocated post api");
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java b/server/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java
index 4b06757652f7..b9d25b83d24e 100644
--- a/server/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java
+++ b/server/src/main/java/org/elasticsearch/indices/InvalidAliasNameException.java
@@ -15,9 +15,8 @@
import java.io.IOException;
-public class InvalidAliasNameException extends ElasticsearchException {
+public final class InvalidAliasNameException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public InvalidAliasNameException(Index index, String name, String desc) {
super("Invalid alias name [{}], {}", name, desc);
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java b/server/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java
index fec791364cf9..7a1e1c8cede4 100644
--- a/server/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java
+++ b/server/src/main/java/org/elasticsearch/indices/InvalidIndexNameException.java
@@ -15,15 +15,13 @@
import java.io.IOException;
-public class InvalidIndexNameException extends ElasticsearchException {
+public final class InvalidIndexNameException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public InvalidIndexNameException(String name, String desc) {
super("Invalid index name [" + name + "], " + desc);
setIndex(name);
}
- @SuppressWarnings("this-escape")
public InvalidIndexNameException(Index index, String name, String desc) {
super("Invalid index name [" + name + "], " + desc);
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/indices/TypeMissingException.java b/server/src/main/java/org/elasticsearch/indices/TypeMissingException.java
index c53b72cbe380..ab4c43397b12 100644
--- a/server/src/main/java/org/elasticsearch/indices/TypeMissingException.java
+++ b/server/src/main/java/org/elasticsearch/indices/TypeMissingException.java
@@ -16,21 +16,18 @@
import java.io.IOException;
import java.util.Arrays;
-public class TypeMissingException extends ElasticsearchException {
+public final class TypeMissingException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
public TypeMissingException(Index index, String... types) {
super("type" + Arrays.toString(types) + " missing");
setIndex(index);
}
- @SuppressWarnings("this-escape")
public TypeMissingException(Index index, Throwable cause, String... types) {
super("type" + Arrays.toString(types) + " missing", cause);
setIndex(index);
}
- @SuppressWarnings("this-escape")
public TypeMissingException(String index, String... types) {
super("type[" + Arrays.toString(types) + "] missing");
setIndex(index);
diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
index 36a89f4c0d40..d692c331927d 100644
--- a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
+++ b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java
@@ -65,7 +65,7 @@
*
* @see org.elasticsearch.index.analysis.HunspellTokenFilterFactory
*/
-public class HunspellService {
+public final class HunspellService {
private static final Logger logger = LogManager.getLogger(HunspellService.class);
@@ -89,7 +89,6 @@ public class HunspellService {
private final Path hunspellDir;
private final Function loadingFunction;
- @SuppressWarnings("this-escape")
public HunspellService(final Settings settings, final Environment env, final Map knownDictionaries)
throws IOException {
this.knownDictionaries = Collections.unmodifiableMap(knownDictionaries);
diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
index 76de0d43b7f2..c77cf15a2b2e 100644
--- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
+++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java
@@ -39,7 +39,7 @@
import java.util.List;
import java.util.function.ToLongBiFunction;
-public class IndicesFieldDataCache implements RemovalListener, Releasable {
+public final class IndicesFieldDataCache implements RemovalListener, Releasable {
private static final Logger logger = LogManager.getLogger(IndicesFieldDataCache.class);
@@ -51,7 +51,6 @@ public class IndicesFieldDataCache implements RemovalListener cache;
- @SuppressWarnings("this-escape")
public IndicesFieldDataCache(Settings settings, IndexFieldDataCache.Listener indicesFieldDataCacheListener) {
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
final long sizeInBytes = INDICES_FIELDDATA_CACHE_SIZE_KEY.get(settings).getBytes();
diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
index 1a40f7526240..87f491a598d5 100644
--- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
+++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverFilesRecoveryException.java
@@ -18,13 +18,12 @@
import java.io.IOException;
import java.util.Objects;
-public class RecoverFilesRecoveryException extends ElasticsearchException implements ElasticsearchWrapperException {
+public final class RecoverFilesRecoveryException extends ElasticsearchException implements ElasticsearchWrapperException {
private final int numberOfFiles;
private final ByteSizeValue totalFilesSize;
- @SuppressWarnings("this-escape")
public RecoverFilesRecoveryException(ShardId shardId, int numberOfFiles, ByteSizeValue totalFilesSize, Throwable cause) {
super("Failed to transfer [{}] files with total size of [{}]", cause, numberOfFiles, totalFilesSize);
Objects.requireNonNull(totalFilesSize, "totalFilesSize must not be null");
diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCommitTooNewException.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCommitTooNewException.java
index d89a429dc853..c42f88c9b843 100644
--- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCommitTooNewException.java
+++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryCommitTooNewException.java
@@ -14,8 +14,7 @@
import java.io.IOException;
-public class RecoveryCommitTooNewException extends ElasticsearchException {
- @SuppressWarnings("this-escape")
+public final class RecoveryCommitTooNewException extends ElasticsearchException {
public RecoveryCommitTooNewException(ShardId shardId, String message) {
super(message);
setShard(shardId);
diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
index 6c32ebd491ed..d631c7a11d10 100644
--- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
+++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java
@@ -64,7 +64,7 @@
import static org.elasticsearch.core.Strings.format;
-public class IndicesStore implements ClusterStateListener, Closeable {
+public final class IndicesStore implements ClusterStateListener, Closeable {
private static final Logger logger = LogManager.getLogger(IndicesStore.class);
@@ -88,7 +88,6 @@ public class IndicesStore implements ClusterStateListener, Closeable {
private final TimeValue deleteShardTimeout;
- @SuppressWarnings("this-escape")
@Inject
public IndicesStore(
Settings settings,
diff --git a/server/src/main/java/org/elasticsearch/inference/EmptyTaskSettings.java b/server/src/main/java/org/elasticsearch/inference/EmptyTaskSettings.java
new file mode 100644
index 000000000000..24bfef4ec313
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/inference/EmptyTaskSettings.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.inference;
+
+import org.elasticsearch.TransportVersion;
+import org.elasticsearch.TransportVersions;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.xcontent.XContentBuilder;
+
+import java.io.IOException;
+
+/**
+ * This class defines an empty task settings object. This is useful for services that do not have any task settings.
+ */
+public record EmptyTaskSettings() implements TaskSettings {
+ public static final String NAME = "empty_task_settings";
+
+ public static EmptyTaskSettings INSTANCE = new EmptyTaskSettings();
+
+ public EmptyTaskSettings(StreamInput in) {
+ this();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public String getWriteableName() {
+ return NAME;
+ }
+
+ @Override
+ public TransportVersion getMinimalSupportedVersion() {
+ return TransportVersions.ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {}
+}
diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java
index 82ce13e591b6..2d7ee9f210e6 100644
--- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java
+++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java
@@ -8,6 +8,7 @@
package org.elasticsearch.inference;
+import org.elasticsearch.TransportVersion;
import org.elasticsearch.action.ActionListener;
import java.io.Closeable;
@@ -76,4 +77,10 @@ public interface InferenceService extends Closeable {
default boolean isInClusterService() {
return false;
}
+
+ /**
+ * Defines the version required across all clusters to use this service
+ * @return {@link TransportVersion} specifying the version
+ */
+ TransportVersion getMinimalSupportedVersion();
}
diff --git a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java
index a8ae380bd3ba..cdccca7eb0c0 100644
--- a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java
+++ b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java
@@ -33,6 +33,13 @@ public class ModelConfigurations implements ToXContentObject, VersionedNamedWrit
private final ServiceSettings serviceSettings;
private final TaskSettings taskSettings;
+ /**
+ * Allows no task settings to be defined. This will default to the {@link EmptyTaskSettings} object.
+ */
+ public ModelConfigurations(String modelId, TaskType taskType, String service, ServiceSettings serviceSettings) {
+ this(modelId, taskType, service, serviceSettings, EmptyTaskSettings.INSTANCE);
+ }
+
public ModelConfigurations(
String modelId,
TaskType taskType,
@@ -40,11 +47,11 @@ public ModelConfigurations(
ServiceSettings serviceSettings,
TaskSettings taskSettings
) {
- this.modelId = modelId;
- this.taskType = taskType;
- this.service = service;
- this.serviceSettings = serviceSettings;
- this.taskSettings = taskSettings;
+ this.modelId = Objects.requireNonNull(modelId);
+ this.taskType = Objects.requireNonNull(taskType);
+ this.service = Objects.requireNonNull(service);
+ this.serviceSettings = Objects.requireNonNull(serviceSettings);
+ this.taskSettings = Objects.requireNonNull(taskSettings);
}
public ModelConfigurations(StreamInput in) throws IOException {
diff --git a/server/src/main/java/org/elasticsearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilter.java b/server/src/main/java/org/elasticsearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilter.java
index e51470edf523..8b6b87ea2e73 100644
--- a/server/src/main/java/org/elasticsearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilter.java
+++ b/server/src/main/java/org/elasticsearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilter.java
@@ -36,8 +36,7 @@
* results or are output (the {@link DuplicateSequenceAttribute} attribute can
* be used to inspect the number of prior sightings when emitDuplicates is true)
*/
-public class DeDuplicatingTokenFilter extends FilteringTokenFilter {
- @SuppressWarnings("this-escape")
+public final class DeDuplicatingTokenFilter extends FilteringTokenFilter {
private final DuplicateSequenceAttribute seqAtt = addAttribute(DuplicateSequenceAttribute.class);
private final boolean emitDuplicates;
static final MurmurHash3.Hash128 seed = new MurmurHash3.Hash128();
diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java
index 838dff420777..5c1381f73001 100644
--- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java
+++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighter.java
@@ -32,6 +32,7 @@
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.search.ESToParentBlockJoinQuery;
import org.elasticsearch.search.runtime.AbstractScriptFieldQuery;
+import org.elasticsearch.search.vectors.KnnScoreDocQuery;
import java.io.IOException;
import java.text.BreakIterator;
@@ -50,7 +51,7 @@
* value as a discrete passage for highlighting (unless the whole content needs to be highlighted).
* Supports both returning empty snippets and non highlighted snippets when no highlighting can be performed.
*/
-public class CustomUnifiedHighlighter extends UnifiedHighlighter {
+public final class CustomUnifiedHighlighter extends UnifiedHighlighter {
public static final char MULTIVAL_SEP_CHAR = (char) 0;
private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
@@ -79,7 +80,6 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
* offset source for it because it'd be super slow
* @param weightMatchesEnabled whether the {@link HighlightFlag#WEIGHT_MATCHES} should be enabled
*/
- @SuppressWarnings("this-escape")
public CustomUnifiedHighlighter(
Builder builder,
OffsetSource offsetSource,
@@ -250,6 +250,13 @@ public void visitLeaf(Query leafQuery) {
if (leafQuery.getClass().getSimpleName().equals("LateParsingQuery")) {
hasUnknownLeaf[0] = true;
}
+ /**
+ * KnnScoreDocQuery requires the same reader that built the docs
+ * When using {@link HighlightFlag#WEIGHT_MATCHES} different readers are used and isn't supported by this query
+ */
+ if (leafQuery instanceof KnnScoreDocQuery) {
+ hasUnknownLeaf[0] = true;
+ }
super.visitLeaf(query);
}
diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java
index 16d44b572cf8..de26d23a149e 100644
--- a/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java
+++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java
@@ -42,7 +42,7 @@
/**
* Runs periodically and attempts to create a temp file to see if the filesystem is writable. If not then it marks the path as unhealthy.
*/
-public class FsHealthService extends AbstractLifecycleComponent implements NodeHealthService {
+public final class FsHealthService extends AbstractLifecycleComponent implements NodeHealthService {
private static final Logger logger = LogManager.getLogger(FsHealthService.class);
@@ -82,7 +82,6 @@ public class FsHealthService extends AbstractLifecycleComponent implements NodeH
Setting.Property.Dynamic
);
- @SuppressWarnings("this-escape")
public FsHealthService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool, NodeEnvironment nodeEnv) {
this.threadPool = threadPool;
this.enabled = ENABLED_SETTING.get(settings);
diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
index 45ad99672082..20c852993586 100644
--- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
+++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
@@ -33,6 +33,7 @@
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService;
import org.elasticsearch.cluster.coordination.Coordinator;
import org.elasticsearch.cluster.coordination.MasterHistoryService;
@@ -1220,7 +1221,10 @@ record PluginServiceInstances(
this.xContentRegistry = xContentRegistry;
logger.debug("initializing HTTP handlers ...");
- actionModule.initRestHandlers(() -> clusterService.state().nodesIfRecovered());
+ actionModule.initRestHandlers(() -> clusterService.state().nodesIfRecovered(), f -> {
+ ClusterState state = clusterService.state();
+ return state.clusterRecovered() && featureService.clusterHasFeature(state, f);
+ });
logger.info("initialized");
}
diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java
index 38d67efa734b..ba7b4bb51d9c 100644
--- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java
+++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java
@@ -46,7 +46,7 @@
/**
* Component that runs only on the master node and is responsible for assigning running tasks to nodes
*/
-public class PersistentTasksClusterService implements ClusterStateListener, Closeable {
+public final class PersistentTasksClusterService implements ClusterStateListener, Closeable {
public static final Setting CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING = Setting.timeSetting(
"cluster.persistent_tasks.allocation.recheck_interval",
@@ -65,7 +65,6 @@ public class PersistentTasksClusterService implements ClusterStateListener, Clos
private final PeriodicRechecker periodicRechecker;
private final AtomicBoolean reassigningTasks = new AtomicBoolean(false);
- @SuppressWarnings("this-escape")
public PersistentTasksClusterService(
Settings settings,
PersistentTasksExecutorRegistry registry,
diff --git a/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java
index ebabec42ef11..ae600dfda39a 100644
--- a/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java
+++ b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java
@@ -28,7 +28,7 @@
*
* @see Allocation
*/
-public class EnableAssignmentDecider {
+public final class EnableAssignmentDecider {
public static final Setting CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING = new Setting<>(
"cluster.persistent_tasks.allocation.enable",
@@ -41,7 +41,6 @@ public class EnableAssignmentDecider {
private volatile Allocation enableAssignment;
- @SuppressWarnings("this-escape")
public EnableAssignmentDecider(final Settings settings, final ClusterSettings clusterSettings) {
this.enableAssignment = CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, this::setEnableAssignment);
diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java
index b4dd0a2f37b3..c88bbcfa91b9 100644
--- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java
@@ -129,7 +129,7 @@ public void restoreShard(
}
@Override
- public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
+ public IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
return in.getShardSnapshotStatus(snapshotId, indexId, shardId);
}
diff --git a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java
index ad0f956a1664..6bd967d84c89 100644
--- a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java
@@ -137,7 +137,7 @@ public void restoreShard(
}
@Override
- public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
+ public IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
throw createCreationException();
}
diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java
index 1fd01631818b..5782dedf3cfb 100644
--- a/server/src/main/java/org/elasticsearch/repositories/Repository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java
@@ -208,7 +208,7 @@ default RepositoryStats stats() {
* Creates a snapshot of the shard referenced by the given {@link SnapshotShardContext}.
*
* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object returned by
- * {@link SnapshotShardContext#status()} and check its {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process
+ * {@link SnapshotShardContext#status()} and call {@link IndexShardSnapshotStatus#ensureNotAborted()} to see if the snapshot process
* should be aborted.
*
* @param snapshotShardContext snapshot shard context that must be completed via {@link SnapshotShardContext#onResponse} or
@@ -244,7 +244,7 @@ void restoreShard(
* @param shardId shard id
* @return snapshot status
*/
- IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId);
+ IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId);
/**
* Check if this instances {@link Settings} can be changed to the provided updated settings without recreating the repository.
diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java
index 1c5ea5a2b001..17ac4ef38f1b 100644
--- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java
+++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java
@@ -1229,6 +1229,22 @@ public int hashCode() {
return Objects.hash(snapshotState, version, startTimeMillis, endTimeMillis, slmPolicy);
}
+ @Override
+ public String toString() {
+ return "SnapshotDetails{"
+ + "snapshotState="
+ + snapshotState
+ + ", version="
+ + version
+ + ", startTimeMillis="
+ + startTimeMillis
+ + ", endTimeMillis="
+ + endTimeMillis
+ + ", slmPolicy='"
+ + slmPolicy
+ + "'}";
+ }
+
public static SnapshotDetails fromSnapshotInfo(SnapshotInfo snapshotInfo) {
return new SnapshotDetails(
snapshotInfo.state(),
diff --git a/server/src/main/java/org/elasticsearch/repositories/SnapshotIndexCommit.java b/server/src/main/java/org/elasticsearch/repositories/SnapshotIndexCommit.java
index 43594aa6047e..b041f51afa6d 100644
--- a/server/src/main/java/org/elasticsearch/repositories/SnapshotIndexCommit.java
+++ b/server/src/main/java/org/elasticsearch/repositories/SnapshotIndexCommit.java
@@ -19,13 +19,12 @@
* A (closeable) {@link IndexCommit} plus ref-counting to keep track of active users, and with the facility to drop the "main" initial ref
* early if the shard snapshot is aborted.
*/
-public class SnapshotIndexCommit extends AbstractRefCounted {
+public final class SnapshotIndexCommit extends AbstractRefCounted {
private final Engine.IndexCommitRef commitRef;
private final Runnable releaseInitialRef;
private final SubscribableListener completionListeners = new SubscribableListener<>();
- @SuppressWarnings("this-escape")
public SnapshotIndexCommit(Engine.IndexCommitRef commitRef) {
this.commitRef = commitRef;
this.releaseInitialRef = new RunOnce(this::decRef);
diff --git a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java
index b9da0b1663c5..30f167d8c5cf 100644
--- a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java
@@ -135,7 +135,7 @@ public void restoreShard(
}
@Override
- public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
+ public IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
throw createUnknownTypeException();
}
diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 743e978181f3..4167717e0900 100644
--- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -3040,10 +3040,7 @@ private void doSnapshotShard(SnapshotShardContext context) {
}
}
for (String fileName : fileNames) {
- if (snapshotStatus.isAborted()) {
- logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName);
- throw new AbortedSnapshotException();
- }
+ ensureNotAborted(shardId, snapshotId, snapshotStatus, fileName);
logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
final StoreFileMetadata md = metadataFromStore.get(fileName);
@@ -3245,6 +3242,16 @@ private void doSnapshotShard(SnapshotShardContext context) {
}
}
+ private static void ensureNotAborted(ShardId shardId, SnapshotId snapshotId, IndexShardSnapshotStatus snapshotStatus, String fileName) {
+ try {
+ snapshotStatus.ensureNotAborted();
+ } catch (Exception e) {
+ logger.debug("[{}] [{}] {} on the file [{}], exiting", shardId, snapshotId, e.getMessage(), fileName);
+ assert e instanceof AbortedSnapshotException : e;
+ throw e;
+ }
+ }
+
protected void snapshotFiles(
SnapshotShardContext context,
BlockingQueue filesToSnapshot,
@@ -3273,7 +3280,12 @@ private static boolean assertFileContentsMatchHash(
store.decRef();
}
} else {
- assert snapshotStatus.isAborted() : "if the store is already closed we must have been aborted";
+ try {
+ snapshotStatus.ensureNotAborted();
+ assert false : "if the store is already closed we must have been aborted";
+ } catch (Exception e) {
+ assert e instanceof AbortedSnapshotException : e;
+ }
}
return true;
}
@@ -3497,7 +3509,7 @@ public InputStream maybeRateLimitSnapshots(InputStream stream, RateLimitingInput
}
@Override
- public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
+ public IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(shardContainer(indexId, shardId), snapshotId);
return IndexShardSnapshotStatus.newDone(
snapshot.startTime(),
@@ -3506,8 +3518,8 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In
snapshot.totalFileCount(),
snapshot.incrementalSize(),
snapshot.totalSize(),
- null
- ); // Not adding a real generation here as it doesn't matter to callers
+ null // Not adding a real generation here as it doesn't matter to callers
+ );
}
@Override
@@ -3712,10 +3724,7 @@ public int read(byte[] b, int off, int len) throws IOException {
}
private void checkAborted() {
- if (snapshotStatus.isAborted()) {
- logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileInfo.physicalName());
- throw new AbortedSnapshotException();
- }
+ ensureNotAborted(shardId, snapshotId, snapshotStatus, fileInfo.physicalName());
}
};
final String partName = fileInfo.partName(i);
diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java
index e1f6b4e80977..2d13af0248a7 100644
--- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java
+++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java
@@ -11,22 +11,17 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.support.PlainActionFuture;
-import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.ReservedStateMetadata;
-import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.common.file.AbstractFileWatchingService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.xcontent.XContentParserConfiguration;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.nio.file.Files;
-import java.nio.file.attribute.FileTime;
-import java.time.Instant;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.xcontent.XContentType.JSON;
@@ -42,16 +37,14 @@
* the service as a listener to cluster state changes, so that we can enable the file watcher thread when this
* node becomes a master node.
*/
-public class FileSettingsService extends AbstractFileWatchingService implements ClusterStateListener {
+public class FileSettingsService extends MasterNodeFileWatchingService implements ClusterStateListener {
private static final Logger logger = LogManager.getLogger(FileSettingsService.class);
public static final String SETTINGS_FILE_NAME = "settings.json";
public static final String NAMESPACE = "file_settings";
public static final String OPERATOR_DIRECTORY = "operator";
- private final ClusterService clusterService;
private final ReservedClusterStateService stateService;
- private volatile boolean active = false;
/**
* Constructs the {@link FileSettingsService}
@@ -61,70 +54,10 @@ public class FileSettingsService extends AbstractFileWatchingService implements
* @param environment we need the environment to pull the location of the config and operator directories
*/
public FileSettingsService(ClusterService clusterService, ReservedClusterStateService stateService, Environment environment) {
- super(environment.configFile().toAbsolutePath().resolve(OPERATOR_DIRECTORY).resolve(SETTINGS_FILE_NAME));
- this.clusterService = clusterService;
+ super(clusterService, environment.configFile().toAbsolutePath().resolve(OPERATOR_DIRECTORY).resolve(SETTINGS_FILE_NAME));
this.stateService = stateService;
}
- @Override
- protected void doStart() {
- // We start the file watcher when we know we are master from a cluster state change notification.
- // We need the additional active flag, since cluster state can change after we've shutdown the service
- // causing the watcher to start again.
- this.active = Files.exists(watchedFileDir().getParent());
- if (active == false) {
- // we don't have a config directory, we can't possibly launch the file settings service
- return;
- }
- if (DiscoveryNode.isMasterNode(clusterService.getSettings())) {
- clusterService.addListener(this);
- }
- }
-
- @Override
- protected void doStop() {
- this.active = false;
- super.doStop();
- }
-
- @Override
- public final void clusterChanged(ClusterChangedEvent event) {
- ClusterState clusterState = event.state();
- if (clusterState.nodes().isLocalNodeElectedMaster()) {
- synchronized (this) {
- if (watching() || active == false) {
- refreshExistingFileStateIfNeeded(clusterState);
- return;
- }
- startWatcher();
- }
- } else if (event.previousState().nodes().isLocalNodeElectedMaster()) {
- stopWatcher();
- }
- }
-
- /**
- * 'Touches' the settings file so the file watcher will re-processes it.
- *
- * The file processing is asynchronous, the cluster state or the file must be already updated such that
- * the version information in the file is newer than what's already saved as processed in the
- * cluster state.
- *
- * For snapshot restores we first must restore the snapshot and then force a refresh, since the cluster state
- * metadata version must be reset to 0 and saved in the cluster state.
- */
- private void refreshExistingFileStateIfNeeded(ClusterState clusterState) {
- if (watching()) {
- if (shouldRefreshFileState(clusterState) && Files.exists(watchedFile())) {
- try {
- Files.setLastModifiedTime(watchedFile(), FileTime.from(Instant.now()));
- } catch (IOException e) {
- logger.warn("encountered I/O error trying to update file settings timestamp", e);
- }
- }
- }
- }
-
/**
* Used by snapshot restore service {@link org.elasticsearch.snapshots.RestoreService} to prepare the reserved
* state of the snapshot for the current cluster.
@@ -162,7 +95,8 @@ public void handleSnapshotRestore(ClusterState clusterState, Metadata.Builder md
* @param clusterState State of the cluster
* @return true if file settings metadata version is exactly 0, false otherwise.
*/
- private boolean shouldRefreshFileState(ClusterState clusterState) {
+ @Override
+ protected boolean shouldRefreshFileState(ClusterState clusterState) {
// We check if the version was reset to 0, and force an update if a file exists. This can happen in situations
// like snapshot restores.
ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(NAMESPACE);
diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java
new file mode 100644
index 000000000000..444cde45d696
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.reservedstate.service;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.file.AbstractFileWatchingService;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileTime;
+import java.time.Instant;
+
+public abstract class MasterNodeFileWatchingService extends AbstractFileWatchingService implements ClusterStateListener {
+
+ private static final Logger logger = LogManager.getLogger(MasterNodeFileWatchingService.class);
+
+ private final ClusterService clusterService;
+ private volatile boolean active = false;
+
+ protected MasterNodeFileWatchingService(ClusterService clusterService, Path watchedFile) {
+ super(watchedFile);
+ this.clusterService = clusterService;
+ }
+
+ @Override
+ protected void doStart() {
+ // We start the file watcher when we know we are master from a cluster state change notification.
+ // We need the additional active flag, since cluster state can change after we've shutdown the service
+ // causing the watcher to start again.
+ this.active = Files.exists(watchedFileDir().getParent());
+ if (active == false) {
+ // we don't have a config directory, we can't possibly launch the file settings service
+ return;
+ }
+ if (DiscoveryNode.isMasterNode(clusterService.getSettings())) {
+ clusterService.addListener(this);
+ }
+ }
+
+ @Override
+ protected void doStop() {
+ this.active = false;
+ super.doStop();
+ }
+
+ @Override
+ public final void clusterChanged(ClusterChangedEvent event) {
+ ClusterState clusterState = event.state();
+ if (clusterState.nodes().isLocalNodeElectedMaster()) {
+ synchronized (this) {
+ if (watching() || active == false) {
+ refreshExistingFileStateIfNeeded(clusterState);
+ return;
+ }
+ startWatcher();
+ }
+ } else if (event.previousState().nodes().isLocalNodeElectedMaster()) {
+ stopWatcher();
+ }
+ }
+
+ /**
+ * 'Touches' the settings file so the file watcher will re-processes it.
+ *
+ * The file processing is asynchronous, the cluster state or the file must be already updated such that
+ * the version information in the file is newer than what's already saved as processed in the
+ * cluster state.
+ *
+ * For snapshot restores we first must restore the snapshot and then force a refresh, since the cluster state
+ * metadata version must be reset to 0 and saved in the cluster state.
+ */
+ private void refreshExistingFileStateIfNeeded(ClusterState clusterState) {
+ if (watching()) {
+ if (shouldRefreshFileState(clusterState) && Files.exists(watchedFile())) {
+ try {
+ Files.setLastModifiedTime(watchedFile(), FileTime.from(Instant.now()));
+ } catch (IOException e) {
+ logger.warn("encountered I/O error trying to update file settings timestamp", e);
+ }
+ }
+ }
+ }
+
+ /**
+ * There may be an indication in cluster state that the file we are watching
+ * should be re-processed: for example, after cluster state has been restored
+ * from a snapshot. By default, we do nothing, but this method should be overridden
+ * if different behavior is desired.
+ * @param clusterState State of the cluster
+ * @return false, by default
+ */
+ protected boolean shouldRefreshFileState(ClusterState clusterState) {
+ return false;
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/rest/RestFeatures.java b/server/src/main/java/org/elasticsearch/rest/RestFeatures.java
new file mode 100644
index 000000000000..73b788d63b2a
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/rest/RestFeatures.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.features.FeatureSpecification;
+import org.elasticsearch.features.NodeFeature;
+import org.elasticsearch.rest.action.admin.cluster.RestClusterGetSettingsAction;
+
+import java.util.Map;
+
+public class RestFeatures implements FeatureSpecification {
+ @Override
+ public Map getHistoricalFeatures() {
+ return Map.of(RestClusterGetSettingsAction.SUPPORTS_GET_SETTINGS_ACTION, Version.V_8_3_0);
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java
index 73b24b21e546..55adc67bf18e 100644
--- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java
+++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java
@@ -36,7 +36,7 @@
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER;
-public class RestResponse {
+public final class RestResponse {
public static final String TEXT_CONTENT_TYPE = "text/plain; charset=UTF-8";
@@ -111,7 +111,6 @@ public RestResponse(RestChannel channel, Exception e) throws IOException {
this(channel, ExceptionsHelper.status(e), e);
}
- @SuppressWarnings("this-escape")
public RestResponse(RestChannel channel, RestStatus status, Exception e) throws IOException {
this.status = status;
ToXContent.Params params = channel.request();
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java
index 189bd9c2b955..7748944306e3 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java
@@ -8,16 +8,15 @@
package org.elasticsearch.rest.action.admin.cluster;
-import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction;
import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.client.internal.node.NodeClient;
-import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.Scope;
@@ -27,28 +26,30 @@
import java.io.IOException;
import java.util.List;
import java.util.Set;
-import java.util.function.Supplier;
+import java.util.function.Predicate;
import static org.elasticsearch.rest.RestRequest.Method.GET;
@ServerlessScope(Scope.INTERNAL)
public class RestClusterGetSettingsAction extends BaseRestHandler {
+ public static final NodeFeature SUPPORTS_GET_SETTINGS_ACTION = new NodeFeature("rest.get_settings_action");
+
private final Settings settings;
private final ClusterSettings clusterSettings;
private final SettingsFilter settingsFilter;
- private final Supplier nodesInCluster;
+ private final Predicate clusterSupportsFeature;
public RestClusterGetSettingsAction(
Settings settings,
ClusterSettings clusterSettings,
SettingsFilter settingsFilter,
- Supplier nodesInCluster
+ Predicate clusterSupportsFeature
) {
this.settings = settings;
this.clusterSettings = clusterSettings;
this.settingsFilter = settingsFilter;
- this.nodesInCluster = nodesInCluster;
+ this.clusterSupportsFeature = clusterSupportsFeature;
}
@Override
@@ -70,7 +71,7 @@ private static void setUpRequestParams(MasterNodeReadRequest> clusterRequest,
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
final boolean renderDefaults = request.paramAsBoolean("include_defaults", false);
- if (nodesInCluster.get().getMinNodeVersion().before(Version.V_8_3_0)) {
+ if (clusterSupportsFeature.test(SUPPORTS_GET_SETTINGS_ACTION) == false) {
return prepareLegacyRequest(request, client, renderDefaults);
}
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java
index 4ebe5350e055..e46468205da6 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/AliasesNotFoundException.java
@@ -13,9 +13,8 @@
import java.io.IOException;
import java.util.Arrays;
-public class AliasesNotFoundException extends ResourceNotFoundException {
+public final class AliasesNotFoundException extends ResourceNotFoundException {
- @SuppressWarnings("this-escape")
public AliasesNotFoundException(String... names) {
super("aliases " + Arrays.toString(names) + " missing");
this.setResources("aliases", names);
diff --git a/server/src/main/java/org/elasticsearch/script/field/WriteField.java b/server/src/main/java/org/elasticsearch/script/field/WriteField.java
index 6a50434b4004..a420c8c7c085 100644
--- a/server/src/main/java/org/elasticsearch/script/field/WriteField.java
+++ b/server/src/main/java/org/elasticsearch/script/field/WriteField.java
@@ -23,16 +23,15 @@
import java.util.function.Predicate;
import java.util.function.Supplier;
-public class WriteField implements Field