From d2e667004596544bbbb594c7d42e215483caf9c5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 11:57:07 +0100 Subject: [PATCH 01/20] Move repo analyzer to its own package (#111963) In preparation for adding more things to the blobstore testkit, this commit moves the repository analyzer implementation from `o.e.r.blobstore.testkit` to `o.e.r.blobstore.testkit.analyze`. --- .../AzureRepositoryAnalysisRestIT.java} | 4 ++-- .../GCSRepositoryAnalysisRestIT.java} | 4 ++-- .../AbstractHdfsRepositoryAnalysisRestIT.java} | 4 ++-- .../HdfsRepositoryAnalysisRestIT.java} | 4 ++-- .../SecureHdfsRepositoryAnalysisRestIT.java} | 4 ++-- .../MinioRepositoryAnalysisRestIT.java} | 4 ++-- x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle | 2 +- .../FsRepositoryAnalysisRestIT.java} | 5 ++--- .../S3RepositoryAnalysisRestIT.java} | 4 ++-- .../blobstore/testkit/{ => analyze}/BytesRegister.java | 2 +- .../{ => analyze}/RepositoryAnalysisFailureIT.java | 7 ++++--- .../{ => analyze}/RepositoryAnalysisSuccessIT.java | 7 ++++--- .../blobstore/testkit/SnapshotRepositoryTestKit.java | 5 ++++- .../testkit/{ => analyze}/BlobAnalyzeAction.java | 2 +- .../testkit/{ => analyze}/BlobWriteAbortedException.java | 2 +- .../{ => analyze}/ContendedRegisterAnalyzeAction.java | 2 +- .../testkit/{ => analyze}/GetBlobChecksumAction.java | 2 +- .../testkit/{ => analyze}/RandomBlobContent.java | 2 +- .../{ => analyze}/RandomBlobContentBytesReference.java | 2 +- .../testkit/{ => analyze}/RandomBlobContentStream.java | 2 +- .../testkit/{ => analyze}/RepositoryAnalyzeAction.java | 8 ++++---- .../{ => analyze}/RepositoryPerformanceSummary.java | 2 +- .../{ => analyze}/RestRepositoryAnalyzeAction.java | 2 +- .../{ => analyze}/UncontendedRegisterAnalyzeAction.java | 6 +++--- .../AbstractRepositoryAnalysisRestTestCase.java} | 4 ++-- .../RandomBlobContentBytesReferenceTests.java | 4 ++-- .../{ => analyze}/RandomBlobContentStreamTests.java | 4 ++-- .../{ => analyze}/RepositoryAnalyzeActionTests.java | 2 +- 28 files changed, 53 insertions(+), 49 deletions(-) rename x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{AzureSnapshotRepoTestKitIT.java => analyze/AzureRepositoryAnalysisRestIT.java} (97%) rename x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{GCSSnapshotRepoTestKitIT.java => analyze/GCSRepositoryAnalysisRestIT.java} (95%) rename x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{AbstractHdfsSnapshotRepoTestKitIT.java => analyze/AbstractHdfsRepositoryAnalysisRestIT.java} (86%) rename x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{HdfsSnapshotRepoTestKitIT.java => analyze/HdfsRepositoryAnalysisRestIT.java} (90%) rename x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{SecureHdfsSnapshotRepoTestKitIT.java => analyze/SecureHdfsRepositoryAnalysisRestIT.java} (93%) rename x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{MinioSnapshotRepoTestKitIT.java => analyze/MinioRepositoryAnalysisRestIT.java} (93%) rename x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{rest/FsSnapshotRepoTestKitIT.java => analyze/FsRepositoryAnalysisRestIT.java} (71%) rename x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/{S3SnapshotRepoTestKitIT.java => analyze/S3RepositoryAnalysisRestIT.java} (94%) rename x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/BytesRegister.java (93%) rename x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalysisFailureIT.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalysisSuccessIT.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/BlobAnalyzeAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/BlobWriteAbortedException.java (85%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/ContendedRegisterAnalyzeAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/GetBlobChecksumAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContent.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentBytesReference.java (97%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentStream.java (97%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalyzeAction.java (99%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryPerformanceSummary.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RestRepositoryAnalyzeAction.java (98%) rename x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/UncontendedRegisterAnalyzeAction.java (96%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{AbstractSnapshotRepoTestKitRestTestCase.java => analyze/AbstractRepositoryAnalysisRestTestCase.java} (90%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentBytesReferenceTests.java (91%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RandomBlobContentStreamTests.java (97%) rename x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/{ => analyze}/RepositoryAnalyzeActionTests.java (98%) diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java index 154b5bec54418..ecc8401e1d79a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AzureSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import fixture.azure.AzureHttpFixture; @@ -25,7 +25,7 @@ import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public class AzureSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class AzureRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false; // TODO when https://github.com/elastic/elasticsearch/issues/111532 addressed, use a HTTPS fixture in FIPS mode too diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/GCSSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GCSRepositoryAnalysisRestIT.java similarity index 95% rename from x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/GCSSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GCSRepositoryAnalysisRestIT.java index 95b6f4aed5221..7f7540d138825 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/GCSSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GCSRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import fixture.gcs.GoogleCloudStorageHttpFixture; import fixture.gcs.TestUtils; @@ -23,7 +23,7 @@ import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public class GCSSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class GCSRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.google.fixture", "true")); private static GoogleCloudStorageHttpFixture fixture = new GoogleCloudStorageHttpFixture(USE_FIXTURE, "bucket", "o/oauth2/token"); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractHdfsRepositoryAnalysisRestIT.java similarity index 86% rename from x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractHdfsRepositoryAnalysisRestIT.java index 2810c4801e8dd..2aec22476d6cc 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/AbstractHdfsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractHdfsRepositoryAnalysisRestIT.java @@ -5,14 +5,14 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.settings.Settings; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public abstract class AbstractHdfsSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public abstract class AbstractHdfsRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { @Override protected String repositoryType() { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/HdfsRepositoryAnalysisRestIT.java similarity index 90% rename from x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/HdfsRepositoryAnalysisRestIT.java index e9787ecdce854..d60497949ff61 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/HdfsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/HdfsRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -17,7 +17,7 @@ import org.junit.rules.TestRule; @ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) -public class HdfsSnapshotRepoTestKitIT extends AbstractHdfsSnapshotRepoTestKitIT { +public class HdfsRepositoryAnalysisRestIT extends AbstractHdfsRepositoryAnalysisRestIT { public static HdfsFixture hdfsFixture = new HdfsFixture(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/SecureHdfsRepositoryAnalysisRestIT.java similarity index 93% rename from x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/SecureHdfsRepositoryAnalysisRestIT.java index 6d599e41e3b9f..dd388c0a79776 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/SecureHdfsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/SecureHdfsRepositoryAnalysisRestIT.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -22,7 +22,7 @@ import org.junit.rules.TestRule; @ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class, TestContainersThreadFilter.class }) -public class SecureHdfsSnapshotRepoTestKitIT extends AbstractHdfsSnapshotRepoTestKitIT { +public class SecureHdfsRepositoryAnalysisRestIT extends AbstractHdfsRepositoryAnalysisRestIT { public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/MinioSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java similarity index 93% rename from x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/MinioSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java index 3e58a8d89ff31..b0068bd7bfdaf 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/MinioSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/minio/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/MinioRepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -18,7 +18,7 @@ import org.junit.rules.TestRule; @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) -public class MinioSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class MinioRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { public static final MinioTestContainer minioFixture = new MinioTestContainer(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle index 17df249b08cf6..8a5dbca7dd0b2 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle @@ -21,7 +21,7 @@ testClusters.matching { it.name == "yamlRestTest" }.configureEach { } tasks.named('yamlRestTestTestingConventions').configure { - baseClass 'org.elasticsearch.repositories.blobstore.testkit.AbstractSnapshotRepoTestKitRestTestCase' + baseClass 'org.elasticsearch.repositories.blobstore.testkit.analyze.AbstractRepositoryAnalysisRestTestCase' baseClass 'org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase' } diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/rest/FsSnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/FsRepositoryAnalysisRestIT.java similarity index 71% rename from x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/rest/FsSnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/FsRepositoryAnalysisRestIT.java index 77dfb3902805a..7151b6e80a4d5 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/rest/FsSnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/rest/src/yamlRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/FsRepositoryAnalysisRestIT.java @@ -5,13 +5,12 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit.rest; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.repositories.blobstore.testkit.AbstractSnapshotRepoTestKitRestTestCase; import org.elasticsearch.repositories.fs.FsRepository; -public class FsSnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class FsRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { @Override protected String repositoryType() { diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java similarity index 94% rename from x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java rename to x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java index c38bd1204189f..8986cf1059191 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/S3SnapshotRepoTestKitIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/s3/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/S3RepositoryAnalysisRestIT.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import fixture.s3.S3HttpFixture; @@ -18,7 +18,7 @@ import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; -public class S3SnapshotRepoTestKitIT extends AbstractSnapshotRepoTestKitRestTestCase { +public class S3RepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { static final boolean USE_FIXTURE = Boolean.parseBoolean(System.getProperty("tests.use.fixture", "true")); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/BytesRegister.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BytesRegister.java similarity index 93% rename from x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/BytesRegister.java rename to x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BytesRegister.java index 4303fff673359..3f5e406ac797b 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/BytesRegister.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BytesRegister.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisFailureIT.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java rename to x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisFailureIT.java index 73a90f247810e..e61f883abd60f 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisFailureIT.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -40,6 +40,7 @@ import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKit; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; @@ -66,8 +67,8 @@ import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.bytesFromLong; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.bytesFromLong; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.anyOf; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisSuccessIT.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java rename to x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisSuccessIT.java index e4d9bf9041b4a..bb452ad2a64ce 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalysisSuccessIT.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.RepositoryMetadata; @@ -36,6 +36,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKit; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectPath; @@ -61,8 +62,8 @@ import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; -import static org.elasticsearch.repositories.blobstore.testkit.RepositoryAnalysisFailureIT.isContendedRegisterKey; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.RepositoryAnalysisFailureIT.isContendedRegisterKey; import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java index 124174a2a025b..04d59906e6db3 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/SnapshotRepositoryTestKit.java @@ -20,6 +20,8 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.blobstore.testkit.analyze.RepositoryAnalyzeAction; +import org.elasticsearch.repositories.blobstore.testkit.analyze.RestRepositoryAnalyzeAction; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.xcontent.XContentBuilder; @@ -51,7 +53,8 @@ public List getRestHandlers( return List.of(new RestRepositoryAnalyzeAction()); } - static void humanReadableNanos(XContentBuilder builder, String rawFieldName, String readableFieldName, long nanos) throws IOException { + public static void humanReadableNanos(XContentBuilder builder, String rawFieldName, String readableFieldName, long nanos) + throws IOException { assert rawFieldName.equals(readableFieldName) == false : rawFieldName + " vs " + readableFieldName; if (builder.humanReadable()) { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java index aa0cf3e3cfc1b..6007968d7cb4d 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobWriteAbortedException.java similarity index 85% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobWriteAbortedException.java index 11c73993a3e6e..8a7bbb7255c5a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobWriteAbortedException.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/BlobWriteAbortedException.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; public class BlobWriteAbortedException extends RuntimeException { public BlobWriteAbortedException() { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/ContendedRegisterAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/ContendedRegisterAnalyzeAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/ContendedRegisterAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/ContendedRegisterAnalyzeAction.java index 40cb4a45a0339..f527a46371641 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/ContendedRegisterAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/ContendedRegisterAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/GetBlobChecksumAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GetBlobChecksumAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/GetBlobChecksumAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GetBlobChecksumAction.java index f706ff79bf073..816f9e860a33a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/GetBlobChecksumAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/GetBlobChecksumAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContent.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContent.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContent.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContent.java index aa9125f214f58..d5061b303f93d 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContent.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContent.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.repositories.RepositoryVerificationException; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReference.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReference.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReference.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReference.java index 44627000a2de9..eee40992cb0d7 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReference.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReference.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStream.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStream.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStream.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStream.java index c6163a7ffd82d..15fa370c5fe0a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStream.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStream.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import java.io.InputStream; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeAction.java similarity index 99% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeAction.java index 30c2d0a89e0ee..5ced0176a4f81 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -83,10 +83,10 @@ import java.util.stream.IntStream; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.repositories.blobstore.testkit.BlobAnalyzeAction.MAX_ATOMIC_WRITE_SIZE; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.bytesFromLong; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; import static org.elasticsearch.repositories.blobstore.testkit.SnapshotRepositoryTestKit.humanReadableNanos; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.BlobAnalyzeAction.MAX_ATOMIC_WRITE_SIZE; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.bytesFromLong; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; /** * Action which distributes a bunch of {@link BlobAnalyzeAction}s over the nodes in the cluster, with limited concurrency, and collects diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryPerformanceSummary.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryPerformanceSummary.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryPerformanceSummary.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryPerformanceSummary.java index 3ee8805480023..c2625285a8912 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryPerformanceSummary.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryPerformanceSummary.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RestRepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RestRepositoryAnalyzeAction.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RestRepositoryAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RestRepositoryAnalyzeAction.java index 2a549db8b3255..b0f6b01936ffa 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RestRepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RestRepositoryAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/UncontendedRegisterAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/UncontendedRegisterAnalyzeAction.java similarity index 96% rename from x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/UncontendedRegisterAnalyzeAction.java rename to x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/UncontendedRegisterAnalyzeAction.java index 1986b47e3188c..23c25e466b917 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/UncontendedRegisterAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/analyze/UncontendedRegisterAnalyzeAction.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -38,8 +38,8 @@ import java.io.IOException; import java.util.Map; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.bytesFromLong; -import static org.elasticsearch.repositories.blobstore.testkit.ContendedRegisterAnalyzeAction.longFromBytes; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.bytesFromLong; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.ContendedRegisterAnalyzeAction.longFromBytes; class UncontendedRegisterAnalyzeAction extends HandledTransportAction { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/AbstractSnapshotRepoTestKitRestTestCase.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractRepositoryAnalysisRestTestCase.java similarity index 90% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/AbstractSnapshotRepoTestKitRestTestCase.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractRepositoryAnalysisRestTestCase.java index 3af8c118803a7..2c96003f7e3d3 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/AbstractSnapshotRepoTestKitRestTestCase.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AbstractRepositoryAnalysisRestTestCase.java @@ -5,14 +5,14 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.apache.http.client.methods.HttpPost; import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; -public abstract class AbstractSnapshotRepoTestKitRestTestCase extends ESRestTestCase { +public abstract class AbstractRepositoryAnalysisRestTestCase extends ESRestTestCase { protected abstract String repositoryType(); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReferenceTests.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReferenceTests.java similarity index 91% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReferenceTests.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReferenceTests.java index c85b634083faf..29a6253c031d8 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentBytesReferenceTests.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentBytesReferenceTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.test.ESTestCase; @@ -13,7 +13,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.CRC32; -import static org.elasticsearch.repositories.blobstore.testkit.RandomBlobContent.BUFFER_SIZE; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.RandomBlobContent.BUFFER_SIZE; import static org.hamcrest.Matchers.equalTo; public class RandomBlobContentBytesReferenceTests extends ESTestCase { diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStreamTests.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStreamTests.java similarity index 97% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStreamTests.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStreamTests.java index 6c353e0937a33..1854d98f7ec79 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RandomBlobContentStreamTests.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RandomBlobContentStreamTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.test.ESTestCase; @@ -14,7 +14,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.CRC32; -import static org.elasticsearch.repositories.blobstore.testkit.RandomBlobContent.BUFFER_SIZE; +import static org.elasticsearch.repositories.blobstore.testkit.analyze.RandomBlobContent.BUFFER_SIZE; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeActionTests.java b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeActionTests.java similarity index 98% rename from x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeActionTests.java rename to x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeActionTests.java index 0d1bdc86002b4..44770e68d714b 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeActionTests.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/test/java/org/elasticsearch/repositories/blobstore/testkit/analyze/RepositoryAnalyzeActionTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.repositories.blobstore.testkit; +package org.elasticsearch.repositories.blobstore.testkit.analyze; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; From ca6d41ce2093989dad829ddfe053a1194e0d0b7a Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 12:04:55 +0100 Subject: [PATCH 02/20] Fail `indexDocs()` on rejection (#111962) In 9dc59e29 we relaxed the `indexDocs()` test utility to retry on rejections caused by exceeding the write queue length limit, but then we massively relaxed this limit in #59559. We should not be seeing such rejections any more, so we can revert this special handling and strengthen the tests to assert that the indexing process encounters no failures at all. --- .../elasticsearch/test/ESIntegTestCase.java | 38 +------------------ 1 file changed, 2 insertions(+), 36 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index aad3dcc457241..fa686a0bc753a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -101,7 +100,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; @@ -109,7 +107,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.gateway.PersistedClusterStateService; @@ -186,7 +183,6 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; @@ -212,7 +208,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -1735,7 +1730,6 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } Collections.shuffle(builders, random()); - final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); List inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. final String[] indicesArray = indices.toArray(new String[] {}); @@ -1744,7 +1738,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder.execute( - new PayloadLatchedActionListener<>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors) + new LatchedActionListener(newLatch(inFlightAsyncOperations)).delegateResponse((l, e) -> fail(e)) ); postIndexAsyncActions(indicesArray, inFlightAsyncOperations, maybeFlush); } @@ -1771,19 +1765,8 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } for (CountDownLatch operation : inFlightAsyncOperations) { - operation.await(); - } - final List actualErrors = new ArrayList<>(); - for (Tuple tuple : errors) { - Throwable t = ExceptionsHelper.unwrapCause(tuple.v2()); - if (t instanceof EsRejectedExecutionException) { - logger.debug("Error indexing doc: " + t.getMessage() + ", reindexing."); - tuple.v1().get(); // re-index if rejected - } else { - actualErrors.add(tuple.v2()); - } + safeAwait(operation); } - assertThat(actualErrors, emptyIterable()); if (bogusIds.isEmpty() == false) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (List doc : bogusIds) { @@ -1957,23 +1940,6 @@ protected void addError(Exception e) {} } - private class PayloadLatchedActionListener extends LatchedActionListener { - private final CopyOnWriteArrayList> errors; - private final T builder; - - PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList> errors) { - super(latch); - this.errors = errors; - this.builder = builder; - } - - @Override - protected void addError(Exception e) { - errors.add(new Tuple<>(builder, e)); - } - - } - /** * Clears the given scroll Ids */ From 7bf730a88f5b1e324d3afbd077cc400ab092de92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 19 Aug 2024 15:55:58 +0200 Subject: [PATCH 03/20] Fix: HierarchyCircuitBreakerTelemetryTests testCircuitBreakerTripCountMetric failing (#111831) * Cleanup code and teardown for testCircuitBreakerTripCountMetric * Move to a more appropriate location --- muted-tests.yml | 3 - .../HierarchyCircuitBreakerTelemetryIT.java} | 101 +++++------------- 2 files changed, 24 insertions(+), 80 deletions(-) rename server/src/{test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java => internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java} (58%) diff --git a/muted-tests.yml b/muted-tests.yml index 22adc4a8c44b5..dd4dd2c7f2ec7 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -137,9 +137,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.indices.breaker.HierarchyCircuitBreakerTelemetryTests - method: testCircuitBreakerTripCountMetric - issue: https://github.com/elastic/elasticsearch/issues/111778 - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {comparison.RangeVersion SYNC} issue: https://github.com/elastic/elasticsearch/issues/111814 diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java similarity index 58% rename from server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java rename to server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java index 2cbe1202520df..ff2117ea93bb9 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerTelemetryTests.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java @@ -6,25 +6,23 @@ * Side Public License, v 1. */ -package org.elasticsearch.indices.breaker; +package org.elasticsearch.indices.memory.breaker; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.indices.breaker.CircuitBreakerMetrics; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.telemetry.Measurement; -import org.elasticsearch.telemetry.RecordingInstruments; -import org.elasticsearch.telemetry.RecordingMeterRegistry; import org.elasticsearch.telemetry.TestTelemetryPlugin; -import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; +import org.junit.After; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -41,54 +39,11 @@ import static org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, supportsDedicatedMasters = true) -public class HierarchyCircuitBreakerTelemetryTests extends ESIntegTestCase { +public class HierarchyCircuitBreakerTelemetryIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return List.of(TestCircuitBreakerTelemetryPlugin.class); - } - - public static class TestCircuitBreakerTelemetryPlugin extends TestTelemetryPlugin { - protected final MeterRegistry meter = new RecordingMeterRegistry() { - private final LongCounter tripCount = new RecordingInstruments.RecordingLongCounter( - CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL, - recorder - ) { - @Override - public void incrementBy(long inc) { - throw new UnsupportedOperationException(); - } - - @Override - public void incrementBy(long inc, Map attributes) { - throw new UnsupportedOperationException(); - } - }; - - @Override - protected LongCounter buildLongCounter(String name, String description, String unit) { - if (name.equals(tripCount.getName())) { - return tripCount; - } - throw new IllegalArgumentException("Unknown counter metric name [" + name + "]"); - } - - @Override - public LongCounter registerLongCounter(String name, String description, String unit) { - assertCircuitBreakerName(name); - return super.registerLongCounter(name, description, unit); - } - - @Override - public LongCounter getLongCounter(String name) { - assertCircuitBreakerName(name); - return super.getLongCounter(name); - } - - private void assertCircuitBreakerName(final String name) { - assertThat(name, Matchers.oneOf(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL)); - } - }; + return List.of(TestTelemetryPlugin.class); } public void testCircuitBreakerTripCountMetric() { @@ -142,37 +97,29 @@ public void testCircuitBreakerTripCountMetric() { fail("Expected exception not thrown"); } - private List getMeasurements(String dataNodeName) { - final TestTelemetryPlugin dataNodeTelemetryPlugin = internalCluster().getInstance(PluginsService.class, dataNodeName) - .filterPlugins(TestCircuitBreakerTelemetryPlugin.class) + @After + public void resetClusterSetting() { + final var circuitBreakerSettings = Settings.builder() + .putNull(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey()) + .putNull(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()) + .putNull(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey()); + updateClusterSettings(circuitBreakerSettings); + } + + private List getMeasurements(String nodeName) { + final TestTelemetryPlugin telemetryPlugin = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(TestTelemetryPlugin.class) .toList() .get(0); return Measurement.combine( - Stream.of(dataNodeTelemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) + Stream.of(telemetryPlugin.getLongCounterMeasurement(CircuitBreakerMetrics.ES_BREAKER_TRIP_COUNT_TOTAL).stream()) .flatMap(Function.identity()) .toList() ); } - - // Make sure circuit breaker telemetry on trip count reports the same values as circuit breaker stats - private void assertCircuitBreakerTripCount( - final HierarchyCircuitBreakerService circuitBreakerService, - final String circuitBreakerName, - int firstBytesEstimate, - int secondBytesEstimate, - long expectedTripCountValue - ) { - try { - circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(firstBytesEstimate, randomAlphaOfLength(5)); - circuitBreakerService.getBreaker(circuitBreakerName).addEstimateBytesAndMaybeBreak(secondBytesEstimate, randomAlphaOfLength(5)); - } catch (final CircuitBreakingException cbex) { - final CircuitBreakerStats circuitBreakerStats = Arrays.stream(circuitBreakerService.stats().getAllStats()) - .filter(stats -> circuitBreakerName.equals(stats.getName())) - .findAny() - .get(); - assertThat(circuitBreakerService.getBreaker(circuitBreakerName).getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); - assertThat(circuitBreakerStats.getTrippedCount(), Matchers.equalTo(expectedTripCountValue)); - } - } - } From 8b0a1aa7ebac47af865f4fbc732cc4a09835906a Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Mon, 19 Aug 2024 16:14:09 +0200 Subject: [PATCH 04/20] [cache] Support async RangeMissingHandler callbacks (#111340) (#111896) Change `fillCacheRange` method to accept a completion listener that must be called by `RangeMissingHandler` implementations when they finish fetching data. By doing so, we support asynchronously fetching the data from a third party storage. We also support asynchronous `SourceInputStreamFactory` for reading gaps from the storage. --- .../shared/SharedBlobCacheService.java | 101 +++++--- .../shared/SharedBlobCacheServiceTests.java | 216 ++++++++++++------ .../store/input/FrozenIndexInput.java | 59 ++--- 3 files changed, 253 insertions(+), 123 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 3242a02dff525..8ca62a3b95023 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -650,13 +650,14 @@ private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, int wri // no need to allocate a new capturing lambda if the offset isn't adjusted return writer; } - return (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> writer.fillCacheRange( + return (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> writer.fillCacheRange( channel, channelPos, streamFactory, relativePos - writeOffset, len, - progressUpdater + progressUpdater, + completionListener ); } @@ -991,16 +992,17 @@ void populateAndRead( executor.execute(fillGapRunnable(gap, writer, null, refs.acquireListener())); } } else { - final List gapFillingTasks = gaps.stream() - .map(gap -> fillGapRunnable(gap, writer, streamFactory, refs.acquireListener())) - .toList(); - executor.execute(() -> { - try (streamFactory) { + var gapFillingListener = refs.acquireListener(); + try (var gfRefs = new RefCountingRunnable(ActionRunnable.run(gapFillingListener, streamFactory::close))) { + final List gapFillingTasks = gaps.stream() + .map(gap -> fillGapRunnable(gap, writer, streamFactory, gfRefs.acquireListener())) + .toList(); + executor.execute(() -> { // Fill the gaps in order. If a gap fails to fill for whatever reason, the task for filling the next // gap will still be executed. gapFillingTasks.forEach(Runnable::run); - } - }); + }); + } } } } @@ -1009,13 +1011,13 @@ void populateAndRead( } } - private AbstractRunnable fillGapRunnable( + private Runnable fillGapRunnable( SparseFileTracker.Gap gap, RangeMissingHandler writer, @Nullable SourceInputStreamFactory streamFactory, ActionListener listener ) { - return ActionRunnable.run(listener.delegateResponse((l, e) -> failGapAndListener(gap, l, e)), () -> { + return () -> ActionListener.run(listener, l -> { var ioRef = io; assert regionOwners.get(ioRef) == CacheFileRegion.this; assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; @@ -1026,10 +1028,15 @@ private AbstractRunnable fillGapRunnable( streamFactory, start, Math.toIntExact(gap.end() - start), - progress -> gap.onProgress(start + progress) + progress -> gap.onProgress(start + progress), + l.map(unused -> { + assert regionOwners.get(ioRef) == CacheFileRegion.this; + assert CacheFileRegion.this.hasReferences() : CacheFileRegion.this; + writeCount.increment(); + gap.onCompletion(); + return null; + }).delegateResponse((delegate, e) -> failGapAndListener(gap, delegate, e)) ); - writeCount.increment(); - gap.onCompletion(); }); } @@ -1117,12 +1124,23 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - writer.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); - var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); - SharedBlobCacheService.this.blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); - SharedBlobCacheService.this.blobCacheMetrics.getCacheMissCounter().increment(); + writer.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos, + length, + progressUpdater, + completionListener.map(unused -> { + var elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeTimeInNanosSupplier.getAsLong() - startTime); + blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); + blobCacheMetrics.getCacheMissCounter().increment(); + return null; + }) + ); } }; if (rangeToRead.isEmpty()) { @@ -1215,9 +1233,18 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos - writeOffset, len, progressUpdater); + delegate.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos - writeOffset, + len, + progressUpdater, + completionListener + ); } }; } @@ -1230,14 +1257,25 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int len, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { assert assertValidRegionAndLength(fileRegion, channelPos, len); - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, len, progressUpdater); - assert regionOwners.get(fileRegion.io) == fileRegion - : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; + delegate.fillCacheRange( + channel, + channelPos, + streamFactory, + relativePos, + len, + progressUpdater, + Assertions.ENABLED ? ActionListener.runBefore(completionListener, () -> { + assert regionOwners.get(fileRegion.io) == fileRegion + : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; + }) : completionListener + ); } }; + } return adjustedWriter; } @@ -1324,6 +1362,7 @@ default SourceInputStreamFactory sharedInputStreamFactory(List completionListener ) throws IOException; } @@ -1343,9 +1383,9 @@ public interface SourceInputStreamFactory extends Releasable { /** * Create the input stream at the specified position. * @param relativePos the relative position in the remote storage to read from. - * @return the input stream ready to be read from. + * @param listener listener for the input stream ready to be read from. */ - InputStream create(int relativePos) throws IOException; + void create(int relativePos, ActionListener listener) throws IOException; } private abstract static class DelegatingRangeMissingHandler implements RangeMissingHandler { @@ -1367,9 +1407,10 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completionListener ) throws IOException { - delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater); + delegate.fillCacheRange(channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener); } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index e477673c90d6d..6c49b50c06e82 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.StoppableExecutorServiceWrapper; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; @@ -72,6 +73,13 @@ private static long size(long numPages) { return numPages * SharedBytes.PAGE_SIZE; } + private static void completeWith(ActionListener listener, CheckedRunnable runnable) { + ActionListener.completeWith(listener, () -> { + runnable.run(); + return null; + }); + } + public void testBasicEviction() throws IOException { Settings settings = Settings.builder() .put(NODE_NAME_SETTING.getKey(), "node") @@ -115,7 +123,10 @@ public void testBasicEviction() throws IOException { ByteRange.of(0L, 1L), ByteRange.of(0L, 1L), (channel, channelPos, relativePos, length) -> 1, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), taskQueue.getThreadPool().generic(), bytesReadFuture ); @@ -552,11 +563,14 @@ public void execute(Runnable command) { cacheService.maybeFetchFullEntry( cacheKey, size, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(-length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(-length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -570,9 +584,15 @@ public void execute(Runnable command) { // a download that would use up all regions should not run final var cacheKey = generateCacheKey(); assertEquals(2, cacheService.freeRegionCount()); - var configured = cacheService.maybeFetchFullEntry(cacheKey, size(500), (ch, chPos, streamFactory, relPos, len, update) -> { - throw new AssertionError("Should never reach here"); - }, bulkExecutor, ActionListener.noop()); + var configured = cacheService.maybeFetchFullEntry( + cacheKey, + size(500), + (ch, chPos, streamFactory, relPos, len, update, completionListener) -> completeWith(completionListener, () -> { + throw new AssertionError("Should never reach here"); + }), + bulkExecutor, + ActionListener.noop() + ); assertFalse(configured); assertEquals(2, cacheService.freeRegionCount()); } @@ -613,9 +633,14 @@ public void testFetchFullCacheEntryConcurrently() throws Exception { (ActionListener listener) -> cacheService.maybeFetchFullEntry( cacheKey, size, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept( - length - ), + ( + channel, + channelPos, + streamFactory, + relativePos, + length, + progressUpdater, + completionListener) -> completeWith(completionListener, () -> progressUpdater.accept(length)), bulkExecutor, listener ) @@ -859,7 +884,10 @@ public void testMaybeEvictLeastUsed() throws Exception { var entry = cacheService.get(cacheKey, regionSize, 0); entry.populate( ByteRange.of(0L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), taskQueue.getThreadPool().generic(), ActionListener.noop() ); @@ -954,11 +982,14 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -985,11 +1016,14 @@ public void execute(Runnable command) { cacheKey, region, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assert streamFactory == null : streamFactory; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, listener ); @@ -1010,9 +1044,12 @@ public void execute(Runnable command) { cacheKey, randomIntBetween(0, 10), randomLongBetween(1L, regionSize), - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - throw new AssertionError("should not be executed"); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + throw new AssertionError("should not be executed"); + } + ), bulkExecutor, future ); @@ -1032,11 +1069,14 @@ public void execute(Runnable command) { cacheKey, 0, blobLength, - (channel, channelPos, ignore, relativePos, length, progressUpdater) -> { - assert ignore == null : ignore; - bytesRead.addAndGet(length); - progressUpdater.accept(length); - }, + (channel, channelPos, ignore, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assert ignore == null : ignore; + bytesRead.addAndGet(length); + progressUpdater.accept(length); + } + ), bulkExecutor, future ); @@ -1110,12 +1150,15 @@ public void execute(Runnable command) { region, range, blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); - assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); - assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); - bytesCopied.addAndGet(length); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + assertThat(range.start() + relativePos, equalTo(cacheService.getRegionStart(region) + regionRange.start())); + assertThat(channelPos, equalTo(Math.toIntExact(regionRange.start()))); + assertThat(length, equalTo(Math.toIntExact(regionRange.length()))); + bytesCopied.addAndGet(length); + } + ), bulkExecutor, future ); @@ -1150,7 +1193,10 @@ public void execute(Runnable command) { region, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> bytesCopied.addAndGet(length) + ), bulkExecutor, listener ); @@ -1173,9 +1219,12 @@ public void execute(Runnable command) { randomIntBetween(0, 10), ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - throw new AssertionError("should not be executed"); - }, + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + throw new AssertionError("should not be executed"); + } + ), bulkExecutor, future ); @@ -1196,7 +1245,10 @@ public void execute(Runnable command) { 0, ByteRange.of(0L, blobLength), blobLength, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> bytesCopied.addAndGet(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> bytesCopied.addAndGet(length) + ), bulkExecutor, future ); @@ -1237,10 +1289,18 @@ public void testPopulate() throws Exception { var entry = cacheService.get(cacheKey, blobLength, 0); AtomicLong bytesWritten = new AtomicLong(0L); final PlainActionFuture future1 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future1); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future1 + ); assertThat(future1.isDone(), is(false)); assertThat(taskQueue.hasRunnableTasks(), is(true)); @@ -1248,18 +1308,34 @@ public void testPopulate() throws Exception { // start populating the second region entry = cacheService.get(cacheKey, blobLength, 1); final PlainActionFuture future2 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future2); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future2 + ); // start populating again the first region, listener should be called immediately entry = cacheService.get(cacheKey, blobLength, 0); final PlainActionFuture future3 = new PlainActionFuture<>(); - entry.populate(ByteRange.of(0, regionSize - 1), (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> { - bytesWritten.addAndGet(length); - progressUpdater.accept(length); - }, taskQueue.getThreadPool().generic(), future3); + entry.populate( + ByteRange.of(0, regionSize - 1), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> { + bytesWritten.addAndGet(length); + progressUpdater.accept(length); + } + ), + taskQueue.getThreadPool().generic(), + future3 + ); assertThat(future3.isDone(), is(true)); var written = future3.get(10L, TimeUnit.SECONDS); @@ -1377,7 +1453,10 @@ public void testSharedSourceInputStreamFactory() throws Exception { range, range, (channel, channelPos, relativePos, length) -> length, - (channel, channelPos, streamFactory, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + (channel, channelPos, streamFactory, relativePos, length, progressUpdater, completionListener) -> completeWith( + completionListener, + () -> progressUpdater.accept(length) + ), EsExecutors.DIRECT_EXECUTOR_SERVICE, future ); @@ -1394,8 +1473,8 @@ public void testSharedSourceInputStreamFactory() throws Exception { final var factoryClosed = new AtomicBoolean(false); final var dummyStreamFactory = new SourceInputStreamFactory() { @Override - public InputStream create(int relativePos) { - return null; + public void create(int relativePos, ActionListener listener) { + listener.onResponse(null); } @Override @@ -1420,17 +1499,20 @@ public void fillCacheRange( SourceInputStreamFactory streamFactory, int relativePos, int length, - IntConsumer progressUpdater + IntConsumer progressUpdater, + ActionListener completion ) throws IOException { - if (invocationCounter.incrementAndGet() == 1) { - final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); - assertThat(witness, nullValue()); - } else { - assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); - } - assertThat(streamFactory, sameInstance(dummyStreamFactory)); - assertThat(position.getAndSet(relativePos), lessThan(relativePos)); - progressUpdater.accept(length); + completeWith(completion, () -> { + if (invocationCounter.incrementAndGet() == 1) { + final Thread witness = invocationThread.compareAndExchange(null, Thread.currentThread()); + assertThat(witness, nullValue()); + } else { + assertThat(invocationThread.get(), sameInstance(Thread.currentThread())); + } + assertThat(streamFactory, sameInstance(dummyStreamFactory)); + assertThat(position.getAndSet(relativePos), lessThan(relativePos)); + progressUpdater.accept(length); + }); } }; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java index 56efc72f2f6f7..d7cf22a05981f 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.blobcache.BlobCacheUtils; import org.elasticsearch.blobcache.common.ByteBufferReference; import org.elasticsearch.blobcache.common.ByteRange; @@ -146,32 +147,38 @@ private void readWithoutBlobCacheSlow(ByteBuffer b, long position, int length) t final int read = SharedBytes.readCacheFile(channel, pos, relativePos, len, byteBufferReference); stats.addCachedBytesRead(read); return read; - }, (channel, channelPos, streamFactory, relativePos, len, progressUpdater) -> { - assert streamFactory == null : streamFactory; - final long startTimeNanos = stats.currentTimeNanos(); - try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { - assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); - logger.trace( - "{}: writing channel {} pos {} length {} (details: {})", - fileInfo.physicalName(), - channelPos, - relativePos, - len, - cacheFile - ); - SharedBytes.copyToCacheFileAligned( - channel, - input, - channelPos, - relativePos, - len, - progressUpdater, - writeBuffer.get().clear() - ); - final long endTimeNanos = stats.currentTimeNanos(); - stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); - } - }); + }, + (channel, channelPos, streamFactory, relativePos, len, progressUpdater, completionListener) -> ActionListener.completeWith( + completionListener, + () -> { + assert streamFactory == null : streamFactory; + final long startTimeNanos = stats.currentTimeNanos(); + try (InputStream input = openInputStreamFromBlobStore(rangeToWrite.start() + relativePos, len)) { + assert ThreadPool.assertCurrentThreadPool(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); + logger.trace( + "{}: writing channel {} pos {} length {} (details: {})", + fileInfo.physicalName(), + channelPos, + relativePos, + len, + cacheFile + ); + SharedBytes.copyToCacheFileAligned( + channel, + input, + channelPos, + relativePos, + len, + progressUpdater, + writeBuffer.get().clear() + ); + final long endTimeNanos = stats.currentTimeNanos(); + stats.addCachedBytesWritten(len, endTimeNanos - startTimeNanos); + return null; + } + } + ) + ); assert bytesRead == length : bytesRead + " vs " + length; byteBufferReference.finish(bytesRead); } finally { From cf034c03df532ef353ff5d09f3cccbf109af53d6 Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Mon, 19 Aug 2024 10:55:34 -0400 Subject: [PATCH 05/20] Add a new random rerank retriever (#111851) * Add a new random rerank retriever, that reranks results in random order without requiring inference * Update docs/changelog/111851.yaml * PR feedback - remove null checks for field as it can never be null * Update docs * Revert "Update docs" This reverts commit 3d61676e8c9ab76472f824554efd607ddd1c5678. * Remove minScore * Random seed * Delete docs/changelog/111851.yaml * PR feedback * Add optional seed to request, YAML test * PR feedback --- .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/inference/InferenceFeatures.java | 6 +- .../xpack/inference/InferencePlugin.java | 6 +- .../rank/random/RandomRankBuilder.java | 165 ++++++++++++++++++ ...ankFeaturePhaseRankCoordinatorContext.java | 55 ++++++ .../random/RandomRankRetrieverBuilder.java | 124 +++++++++++++ .../rank/random/RandomRankBuilderTests.java | 70 ++++++++ .../RandomRankRetrieverBuilderTests.java | 104 +++++++++++ .../inference/80_random_rerank_retriever.yml | 94 ++++++++++ 9 files changed, 623 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java create mode 100644 x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fd3a3d8672966..1009d9e2ae7d1 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -191,6 +191,7 @@ static TransportVersion def(int id) { public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); + public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 4cc7f5b502ba9..12a32ecdc6d4f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -9,6 +9,7 @@ import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import java.util.Set; @@ -20,7 +21,10 @@ public class InferenceFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED); + return Set.of( + TextSimilarityRankRetrieverBuilder.TEXT_SIMILARITY_RERANKER_RETRIEVER_SUPPORTED, + RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED + ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index f6d4a9f774a91..9d85bbf751250 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -63,6 +63,8 @@ import org.elasticsearch.xpack.inference.logging.ThrottlerManager; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; +import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; +import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; import org.elasticsearch.xpack.inference.registry.ModelRegistry; @@ -243,6 +245,7 @@ public List getInferenceServiceFactories() { public List getNamedWriteables() { var entries = new ArrayList<>(InferenceNamedWriteablesProvider.getNamedWriteables()); entries.add(new NamedWriteableRegistry.Entry(RankBuilder.class, TextSimilarityRankBuilder.NAME, TextSimilarityRankBuilder::new)); + entries.add(new NamedWriteableRegistry.Entry(RankBuilder.class, RandomRankBuilder.NAME, RandomRankBuilder::new)); return entries; } @@ -336,7 +339,8 @@ public List> getQueries() { @Override public List> getRetrievers() { return List.of( - new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent) + new RetrieverSpec<>(new ParseField(TextSimilarityRankBuilder.NAME), TextSimilarityRankRetrieverBuilder::fromXContent), + new RetrieverSpec<>(new ParseField(RandomRankBuilder.NAME), RandomRankRetrieverBuilder::fromXContent) ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java new file mode 100644 index 0000000000000..fdb5503e491eb --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.rank.RankBuilder; +import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.QueryPhaseRankShardContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankShardContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.rerank.RerankingQueryPhaseRankShardContext; +import org.elasticsearch.search.rank.rerank.RerankingRankFeaturePhaseRankShardContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder.FIELD_FIELD; +import static org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder.SEED_FIELD; + +/** + * A {@code RankBuilder} that performs reranking with random scores, used for testing. + */ +public class RandomRankBuilder extends RankBuilder { + + public static final String NAME = "random_reranker"; + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, args -> { + Integer rankWindowSize = args[0] == null ? DEFAULT_RANK_WINDOW_SIZE : (Integer) args[0]; + String field = (String) args[1]; + Integer seed = (Integer) args[2]; + + return new RandomRankBuilder(rankWindowSize, field, seed); + }); + + static { + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareString(constructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), SEED_FIELD); + } + + private final String field; + private final Integer seed; + + public RandomRankBuilder(int rankWindowSize, String field, Integer seed) { + super(rankWindowSize); + + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException("field is required"); + } + + this.field = field; + this.seed = seed; + } + + public RandomRankBuilder(StreamInput in) throws IOException { + super(in); + // rankWindowSize deserialization is handled by the parent class RankBuilder + this.field = in.readString(); + this.seed = in.readOptionalInt(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.RANDOM_RERANKER_RETRIEVER; + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + out.writeString(field); + out.writeOptionalInt(seed); + } + + @Override + public void doXContent(XContentBuilder builder, Params params) throws IOException { + // rankWindowSize serialization is handled by the parent class RankBuilder + builder.field(FIELD_FIELD.getPreferredName(), field); + if (seed != null) { + builder.field(SEED_FIELD.getPreferredName(), seed); + } + } + + @Override + public boolean isCompoundBuilder() { + return false; + } + + @Override + public Explanation explainHit(Explanation baseExplanation, RankDoc scoreDoc, List queryNames) { + if (scoreDoc == null) { + return baseExplanation; + } + if (false == baseExplanation.isMatch()) { + return baseExplanation; + } + + assert scoreDoc instanceof RankFeatureDoc : "ScoreDoc is not an instance of RankFeatureDoc"; + RankFeatureDoc rankFeatureDoc = (RankFeatureDoc) scoreDoc; + + return Explanation.match( + rankFeatureDoc.score, + "rank after reranking: [" + rankFeatureDoc.rank + "] using seed [" + seed + "] with score: [" + rankFeatureDoc.score + "]", + baseExplanation + ); + } + + @Override + public QueryPhaseRankShardContext buildQueryPhaseShardContext(List queries, int from) { + return new RerankingQueryPhaseRankShardContext(queries, rankWindowSize()); + } + + @Override + public QueryPhaseRankCoordinatorContext buildQueryPhaseCoordinatorContext(int size, int from) { + return new RerankingQueryPhaseRankCoordinatorContext(rankWindowSize()); + } + + @Override + public RankFeaturePhaseRankShardContext buildRankFeaturePhaseShardContext() { + return new RerankingRankFeaturePhaseRankShardContext(field); + } + + @Override + public RankFeaturePhaseRankCoordinatorContext buildRankFeaturePhaseCoordinatorContext(int size, int from, Client client) { + return new RandomRankFeaturePhaseRankCoordinatorContext(size, from, rankWindowSize(), seed); + } + + public String field() { + return field; + } + + @Override + protected boolean doEquals(RankBuilder other) { + RandomRankBuilder that = (RandomRankBuilder) other; + return Objects.equals(field, that.field) && Objects.equals(seed, that.seed); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, seed); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java new file mode 100644 index 0000000000000..446d8e5862dd2 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankFeaturePhaseRankCoordinatorContext.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.search.rank.context.RankFeaturePhaseRankCoordinatorContext; +import org.elasticsearch.search.rank.feature.RankFeatureDoc; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Random; + +/** + * A {@code RankFeaturePhaseRankCoordinatorContext} that performs a rerank inference call to determine relevance scores for documents within + * the provided rank window. + */ +public class RandomRankFeaturePhaseRankCoordinatorContext extends RankFeaturePhaseRankCoordinatorContext { + + private final Integer seed; + + public RandomRankFeaturePhaseRankCoordinatorContext(int size, int from, int rankWindowSize, Integer seed) { + super(size, from, rankWindowSize); + this.seed = seed; + } + + @Override + protected void computeScores(RankFeatureDoc[] featureDocs, ActionListener scoreListener) { + // Generate random scores seeded by doc + float[] scores = new float[featureDocs.length]; + for (int i = 0; i < featureDocs.length; i++) { + RankFeatureDoc featureDoc = featureDocs[i]; + int doc = featureDoc.doc; + long docSeed = seed != null ? seed + doc : doc; + scores[i] = new Random(docSeed).nextFloat(); + } + scoreListener.onResponse(scores); + } + + /** + * Sorts documents by score descending. + * @param originalDocs documents to process + */ + @Override + protected RankFeatureDoc[] preprocess(RankFeatureDoc[] originalDocs) { + return Arrays.stream(originalDocs) + .sorted(Comparator.comparing((RankFeatureDoc doc) -> doc.score).reversed()) + .toArray(RankFeatureDoc[]::new); + } + +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java new file mode 100644 index 0000000000000..ab8c85cac00e3 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilder.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A {@code RetrieverBuilder} for parsing and constructing a text similarity reranker retriever. + */ +public class RandomRankRetrieverBuilder extends RetrieverBuilder { + + public static final NodeFeature RANDOM_RERANKER_RETRIEVER_SUPPORTED = new NodeFeature("random_reranker_retriever_supported"); + + public static final ParseField RETRIEVER_FIELD = new ParseField("retriever"); + public static final ParseField FIELD_FIELD = new ParseField("field"); + public static final ParseField RANK_WINDOW_SIZE_FIELD = new ParseField("rank_window_size"); + public static final ParseField SEED_FIELD = new ParseField("seed"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(RandomRankBuilder.NAME, args -> { + RetrieverBuilder retrieverBuilder = (RetrieverBuilder) args[0]; + String field = (String) args[1]; + int rankWindowSize = args[2] == null ? DEFAULT_RANK_WINDOW_SIZE : (int) args[2]; + Integer seed = (Integer) args[3]; + + return new RandomRankRetrieverBuilder(retrieverBuilder, field, rankWindowSize, seed); + }); + + static { + PARSER.declareNamedObject(constructorArg(), (p, c, n) -> p.namedObject(RetrieverBuilder.class, n, c), RETRIEVER_FIELD); + PARSER.declareString(optionalConstructorArg(), FIELD_FIELD); + PARSER.declareInt(optionalConstructorArg(), RANK_WINDOW_SIZE_FIELD); + PARSER.declareInt(optionalConstructorArg(), SEED_FIELD); + + RetrieverBuilder.declareBaseParserFields(RandomRankBuilder.NAME, PARSER); + } + + public static RandomRankRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException { + if (context.clusterSupportsFeature(RANDOM_RERANKER_RETRIEVER_SUPPORTED) == false) { + throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + RandomRankBuilder.NAME + "]"); + } + return PARSER.apply(parser, context); + } + + private final RetrieverBuilder retrieverBuilder; + private final String field; + private final int rankWindowSize; + private final Integer seed; + + public RandomRankRetrieverBuilder(RetrieverBuilder retrieverBuilder, String field, int rankWindowSize, Integer seed) { + this.retrieverBuilder = retrieverBuilder; + this.field = field; + this.rankWindowSize = rankWindowSize; + this.seed = seed; + } + + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + retrieverBuilder.extractToSearchSourceBuilder(searchSourceBuilder, compoundUsed); + + // Combining with other rank builder (such as RRF) is not supported + if (searchSourceBuilder.rankBuilder() != null) { + throw new IllegalArgumentException("random rank builder cannot be combined with other rank builders"); + } + + searchSourceBuilder.rankBuilder(new RandomRankBuilder(this.rankWindowSize, this.field, this.seed)); + } + + @Override + public String getName() { + return RandomRankBuilder.NAME; + } + + public int rankWindowSize() { + return rankWindowSize; + } + + @Override + protected void doToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RETRIEVER_FIELD.getPreferredName()); + builder.startObject(); + builder.field(retrieverBuilder.getName(), retrieverBuilder); + builder.endObject(); + builder.field(FIELD_FIELD.getPreferredName(), field); + builder.field(RANK_WINDOW_SIZE_FIELD.getPreferredName(), rankWindowSize); + if (seed != null) { + builder.field(SEED_FIELD.getPreferredName(), seed); + } + } + + @Override + protected boolean doEquals(Object other) { + RandomRankRetrieverBuilder that = (RandomRankRetrieverBuilder) other; + return Objects.equals(retrieverBuilder, that.retrieverBuilder) + && Objects.equals(field, that.field) + && Objects.equals(rankWindowSize, that.rankWindowSize) + && Objects.equals(seed, that.seed); + } + + @Override + protected int doHashCode() { + return Objects.hash(retrieverBuilder, field, rankWindowSize, seed); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java new file mode 100644 index 0000000000000..c464dbaea47cd --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilderTests.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class RandomRankBuilderTests extends AbstractXContentSerializingTestCase { + + @Override + protected RandomRankBuilder createTestInstance() { + return new RandomRankBuilder(randomIntBetween(1, 1000), "my-field", randomBoolean() ? randomIntBetween(1, 1000) : null); + } + + @Override + protected RandomRankBuilder mutateInstance(RandomRankBuilder instance) throws IOException { + String field = instance.field() + randomAlphaOfLength(2); + int rankWindowSize = randomValueOtherThan(instance.rankWindowSize(), this::randomRankWindowSize); + Integer seed = randomBoolean() ? randomIntBetween(1, 1000) : null; + return new RandomRankBuilder(rankWindowSize, field, seed); + } + + @Override + protected Writeable.Reader instanceReader() { + return RandomRankBuilder::new; + } + + @Override + protected RandomRankBuilder doParseInstance(XContentParser parser) throws IOException { + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.START_OBJECT); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.FIELD_NAME); + assertEquals(parser.currentName(), RandomRankBuilder.NAME); + RandomRankBuilder builder = RandomRankBuilder.PARSER.parse(parser, null); + parser.nextToken(); + assertEquals(parser.currentToken(), XContentParser.Token.END_OBJECT); + parser.nextToken(); + assertNull(parser.currentToken()); + return builder; + } + + private int randomRankWindowSize() { + return randomIntBetween(0, 1000); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "field": "my-field" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + RandomRankBuilder parsed = RandomRankBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java new file mode 100644 index 0000000000000..c33f30d461350 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/random/RandomRankRetrieverBuilderTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.rank.random; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.search.retriever.RetrieverBuilder; +import org.elasticsearch.search.retriever.RetrieverParserContext; +import org.elasticsearch.search.retriever.TestRetrieverBuilder; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.usage.SearchUsage; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankBuilder; +import org.elasticsearch.xpack.inference.rank.textsimilarity.TextSimilarityRankRetrieverBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.search.rank.RankBuilder.DEFAULT_RANK_WINDOW_SIZE; + +public class RandomRankRetrieverBuilderTests extends AbstractXContentTestCase { + + /** + * Creates a random {@link RandomRankRetrieverBuilder}. The created instance + * is not guaranteed to pass {@link SearchRequest} validation. This is purely + * for x-content testing. + */ + public static RandomRankRetrieverBuilder createRandomRankRetrieverBuilder() { + return new RandomRankRetrieverBuilder( + TestRetrieverBuilder.createRandomTestRetrieverBuilder(), + randomAlphaOfLength(10), + randomIntBetween(1, 10000), + randomBoolean() ? randomIntBetween(1, 1000) : null + ); + } + + @Override + protected RandomRankRetrieverBuilder createTestInstance() { + return createRandomRankRetrieverBuilder(); + } + + @Override + protected RandomRankRetrieverBuilder doParseInstance(XContentParser parser) { + return RandomRankRetrieverBuilder.PARSER.apply( + parser, + new RetrieverParserContext( + new SearchUsage(), + nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == RandomRankRetrieverBuilder.RANDOM_RERANKER_RETRIEVER_SUPPORTED + ) + ); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + TestRetrieverBuilder.TEST_SPEC.getName(), + (p, c) -> TestRetrieverBuilder.TEST_SPEC.getParser().fromXContent(p, (RetrieverParserContext) c), + TestRetrieverBuilder.TEST_SPEC.getName().getForRestApiVersion() + ) + ); + entries.add( + new NamedXContentRegistry.Entry( + RetrieverBuilder.class, + new ParseField(TextSimilarityRankBuilder.NAME), + (p, c) -> TextSimilarityRankRetrieverBuilder.PARSER.apply(p, (RetrieverParserContext) c) + ) + ); + return new NamedXContentRegistry(entries); + } + + public void testParserDefaults() throws IOException { + String json = """ + { + "retriever": { + "test": { + "value": "my-test-retriever" + } + }, + "field": "my-field" + }"""; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + RandomRankRetrieverBuilder parsed = RandomRankRetrieverBuilder.PARSER.parse(parser, null); + assertEquals(DEFAULT_RANK_WINDOW_SIZE, parsed.rankWindowSize()); + } + } + +} diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml new file mode 100644 index 0000000000000..d33f57f763db8 --- /dev/null +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/80_random_rerank_retriever.yml @@ -0,0 +1,94 @@ +setup: + - requires: + cluster_features: "gte_v8.16.0" + reason: random rerank retriever introduced in 8.16.0 + test_runner_features: "close_to" + + - do: + indices.create: + index: test-index + body: + mappings: + properties: + text: + type: text + topic: + type: keyword + subtopic: + type: keyword + + - do: + bulk: + refresh: true + index: test-index + body: | + {"index": { "_id": "doc_1" } } + { "text": "Pugs are proof that even nature has a sense of humor." } + {"index": { "_id": "doc_2" } } + { "text": "A pugs snore can rival a chainsaw, but it's somehow adorable." } + {"index": { "_id": "doc_3" } } + { "text": "Pugs are like potato chips; you can't have just one wrinkle." } + {"index": { "_id": "doc_4" } } + { "text": "Pugs don't walk; pugs waddle majestically." } + {"index": { "_id": "doc_5" } } + { "text": "A pugs life goal: be the ultimate couch potato, and they're crushing it." } +--- +"Random rerank retriever predictably shuffles results": + + - do: + search: + index: test-index + body: + query: + query_string: + query: "pugs" + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_4" } + - close_to: { hits.hits.0._score: { value: 0.136, error: 0.001 } } + + - do: + search: + index: test-index + body: + retriever: + random_reranker: + retriever: + standard: + query: + query_string: + query: "pugs" + field: text + seed: 42 + rank_window_size: 10 + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_1" } + - close_to: { hits.hits.0._score: { value: 0.727, error: 0.001 } } + + - do: + search: + index: test-index + body: + retriever: + random_reranker: + retriever: + standard: + query: + query_string: + query: "pugs" + field: text + rank_window_size: 10 + size: 10 + + - match: { hits.total.value: 5 } + - length: { hits.hits: 5 } + + - match: { hits.hits.0._id: "doc_3" } + - close_to: { hits.hits.0._score: { value: 0.731, error: 0.001 } } From ba8590ba13b898909eb2418671478d9e9643e09d Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Mon, 19 Aug 2024 18:57:39 +0400 Subject: [PATCH 06/20] Add analysis-common YAML tests to rest-resources-zip (#111974) --- modules/analysis-common/build.gradle | 3 +++ .../test/indices.analyze/{10_analyze.yml => 15_analyze.yml} | 0 x-pack/rest-resources-zip/build.gradle | 1 + 3 files changed, 4 insertions(+) rename modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/{10_analyze.yml => 15_analyze.yml} (100%) diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 77fd095806d10..1fc42a1b294fe 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -36,3 +36,6 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search.query/50_queries_with_synonyms/Test common terms query with stacked tokens", "#42654 - `common` query throws an exception") } +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml similarity index 100% rename from modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/10_analyze.yml rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/indices.analyze/15_analyze.yml diff --git a/x-pack/rest-resources-zip/build.gradle b/x-pack/rest-resources-zip/build.gradle index 3d0533b4ec57e..cc5bddf12d801 100644 --- a/x-pack/rest-resources-zip/build.gradle +++ b/x-pack/rest-resources-zip/build.gradle @@ -20,6 +20,7 @@ dependencies { apis project(path: ':rest-api-spec', configuration: 'restSpecs') freeTests project(path: ':rest-api-spec', configuration: 'restTests') freeTests project(path: ':modules:aggregations', configuration: 'restTests') + freeTests project(path: ':modules:analysis-common', configuration: 'restTests') compatApis project(path: ':rest-api-spec', configuration: 'restCompatSpecs') compatApis project(path: ':x-pack:plugin', configuration: 'restCompatSpecs') freeCompatTests project(path: ':rest-api-spec', configuration: 'restCompatTests') From aa959e69cc507a16f7f725240db2e7453c0a8320 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 19 Aug 2024 17:18:15 +0200 Subject: [PATCH 07/20] ES|QL: shorten error messages for UnsupportedAttributes (#111973) When dealing with index patterns, eg. `FROM logs-*`, some fields can have the same name but different types in different indices. In this case we build an error message like ``` Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types: [ip] in [test1, test2], [keyword] in [test3]" ``` With this PR, in case of many indices involved, we avoid listing them all, but we only list three of them and provide information about how many other indices are affected, eg. ``` Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types: [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6] ``` (see the `and [2] other indices`) Since these error messages are stored in `UnspportedAttributes` and serialized, this PR reduces significantly the size of a serialized execution plan with many type conflicts. Fixes https://github.com/elastic/elasticsearch/issues/111964 Related to https://github.com/elastic/elasticsearch/issues/111358 --- .../esql/core/type/InvalidMappedField.java | 8 +- .../xpack/esql/action/EsqlCapabilities.java | 7 +- .../xpack/esql/analysis/VerifierTests.java | 31 +++-- .../test/esql/51_many_indexes.yml | 126 ++++++++++++++++++ 4 files changed, 156 insertions(+), 16 deletions(-) create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java index 9b3d7950c2a01..8b15893f8a056 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java @@ -130,7 +130,13 @@ private static String makeErrorMessage(Map> typesToIndices) errorMessage.append("["); errorMessage.append(e.getKey()); errorMessage.append("] in "); - errorMessage.append(e.getValue()); + if (e.getValue().size() <= 3) { + errorMessage.append(e.getValue()); + } else { + errorMessage.append(e.getValue().stream().sorted().limit(3).collect(Collectors.toList())); + errorMessage.append(" and [" + (e.getValue().size() - 3) + "] other "); + errorMessage.append(e.getValue().size() == 4 ? "index" : "indices"); + } } return errorMessage.toString(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 3abbb655dadd3..996c5ac2ea319 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -229,7 +229,12 @@ public enum Cap { /** * Consider the upper bound when computing the interval in BUCKET auto mode. */ - BUCKET_INCLUSIVE_UPPER_BOUND; + BUCKET_INCLUSIVE_UPPER_BOUND, + + /** + * Changed error messages for fields with conflicting types in different indices. + */ + SHORT_ERROR_MESSAGES_FOR_UNSUPPORTED_FIELDS; private final boolean snapshotOnly; private final FeatureFlag featureFlag; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 904308ef64d58..9b0c32b8ade2e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -64,9 +64,12 @@ public void testUnsupportedAndMultiTypedFields() { LinkedHashSet ipIndices = new LinkedHashSet<>(); ipIndices.add("test1"); ipIndices.add("test2"); + ipIndices.add("test3"); + ipIndices.add("test4"); + ipIndices.add("test5"); LinkedHashMap> typesToIndices = new LinkedHashMap<>(); typesToIndices.put("ip", ipIndices); - typesToIndices.put("keyword", Set.of("test3")); + typesToIndices.put("keyword", Set.of("test6")); EsField multiTypedField = new InvalidMappedField(multiTyped, typesToIndices); // Also add an unsupported/multityped field under the names `int` and `double` so we can use `LOOKUP int_number_names ...` and @@ -85,7 +88,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:22: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | dissect multi_typed \"%{foo}\"", analyzer) ); @@ -95,7 +98,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:19: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | grok multi_typed \"%{WORD:foo}\"", analyzer) ); @@ -115,7 +118,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:23: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | eval x = multi_typed", analyzer) ); @@ -125,7 +128,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:32: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | eval x = to_lower(multi_typed)", analyzer) ); @@ -135,7 +138,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:32: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats count(1) by multi_typed", analyzer) ); if (EsqlCapabilities.Cap.INLINESTATS.isEnabled()) { @@ -145,7 +148,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:38: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | inlinestats count(1) by multi_typed", analyzer) ); } @@ -156,7 +159,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:27: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats values(multi_typed)", analyzer) ); if (EsqlCapabilities.Cap.INLINESTATS.isEnabled()) { @@ -166,7 +169,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:33: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | inlinestats values(multi_typed)", analyzer) ); } @@ -177,7 +180,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:27: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | stats values(multi_typed)", analyzer) ); @@ -200,7 +203,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:24: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | mv_expand multi_typed", analyzer) ); @@ -210,7 +213,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:21: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | rename multi_typed as x", analyzer) ); @@ -220,7 +223,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:19: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | sort multi_typed desc", analyzer) ); @@ -230,7 +233,7 @@ public void testUnsupportedAndMultiTypedFields() { ); assertEquals( "1:20: Cannot use field [multi_typed] due to ambiguities being mapped as [2] incompatible types:" - + " [ip] in [test1, test2], [keyword] in [test3]", + + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | where multi_typed is not null", analyzer) ); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml new file mode 100644 index 0000000000000..eb589cb810cc3 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/51_many_indexes.yml @@ -0,0 +1,126 @@ +setup: + - requires: + capabilities: + - method: POST + path: /_query + parameters: [method, path, parameters, capabilities] + capabilities: [short_error_messages_for_unsupported_fields] + reason: "We changed error messages for unsupported fields in v 8.16" + test_runner_features: [capabilities, allowed_warnings_regex] + + - do: + indices.create: + index: ambiguous_1 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_2 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_3 + body: + mappings: + properties: + "name": + type: keyword + + - do: + indices.create: + index: ambiguous_4 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_5 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_6 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_7 + body: + mappings: + properties: + "name": + type: integer + + - do: + indices.create: + index: ambiguous_8 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_9 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_10 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_11 + body: + mappings: + properties: + "name": + type: ip + + - do: + indices.create: + index: ambiguous_12 + body: + mappings: + properties: + "name": + type: ip + +--- +load many indices with ambiguities: + - do: + catch: '/Cannot use field \[name\] due to ambiguities being mapped as \[3\] incompatible types: \[integer\] in \[ambiguous_4, ambiguous_5, ambiguous_6\] and \[1\] other index, \[ip\] in \[ambiguous_10, ambiguous_11, ambiguous_12\] and \[2\] other indices, \[keyword\] in \[ambiguous_1, ambiguous_2, ambiguous_3\]/' + esql.query: + body: + query: 'FROM ambiguous* | SORT name' + From e6b830e3b3e3665dff061c4e6c92285efdb1df55 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 19 Aug 2024 17:49:48 +0100 Subject: [PATCH 08/20] Clean up dangling S3 multipart uploads (#111955) If Elasticsearch fails part-way through a multipart upload to S3 it will generally try and abort the upload, but it's possible that the abort attempt also fails. In this case the upload becomes _dangling_. Dangling uploads consume storage space, and therefore cost money, until they are eventually aborted. Earlier versions of Elasticsearch require users to check for dangling multipart uploads, and to manually abort any that they find. This commit introduces a cleanup process which aborts all dangling uploads on each snapshot delete instead. Closes #44971 Closes #101169 --- docs/changelog/111955.yaml | 7 ++ .../snapshot-restore/repository-s3.asciidoc | 36 ++---- .../s3/S3BlobStoreRepositoryTests.java | 113 ++++++++++++++++++ .../repositories/s3/S3BlobContainer.java | 95 +++++++++++++++ .../repositories/s3/S3Repository.java | 95 +++++++++++++++ .../main/java/fixture/s3/S3HttpHandler.java | 11 +- 6 files changed, 329 insertions(+), 28 deletions(-) create mode 100644 docs/changelog/111955.yaml diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml new file mode 100644 index 0000000000000..ebc518203b7cc --- /dev/null +++ b/docs/changelog/111955.yaml @@ -0,0 +1,7 @@ +pr: 111955 +summary: Clean up dangling S3 multipart uploads +area: Snapshot/Restore +type: enhancement +issues: + - 101169 + - 44971 diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index d757a74110ca9..3a9c12caebad9 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -317,6 +317,15 @@ include::repository-shared-settings.asciidoc[] https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html[AWS DeleteObjects API]. +`max_multipart_upload_cleanup_size`:: + + (<>) Sets the maximum number of possibly-dangling multipart + uploads to clean up in each batch of snapshot deletions. Defaults to `1000` + which is the maximum number supported by the + https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[AWS + ListMultipartUploads API]. If set to `0`, {es} will not attempt to clean up + dangling multipart uploads. + NOTE: The option of defining client settings in the repository settings as documented below is considered deprecated, and will be removed in a future version. @@ -492,33 +501,6 @@ by the `elasticsearch` user. By default, {es} runs as user `elasticsearch` using If the symlink exists, it will be used by default by all S3 repositories that don't have explicit `client` credentials. -==== Cleaning up multi-part uploads - -{es} uses S3's multi-part upload process to upload larger blobs to the -repository. The multi-part upload process works by dividing each blob into -smaller parts, uploading each part independently, and then completing the -upload in a separate step. This reduces the amount of data that {es} must -re-send if an upload fails: {es} only needs to re-send the part that failed -rather than starting from the beginning of the whole blob. The storage for each -part is charged independently starting from the time at which the part was -uploaded. - -If a multi-part upload cannot be completed then it must be aborted in order to -delete any parts that were successfully uploaded, preventing further storage -charges from accumulating. {es} will automatically abort a multi-part upload on -failure, but sometimes the abort request itself fails. For example, if the -repository becomes inaccessible or the instance on which {es} is running is -terminated abruptly then {es} cannot complete or abort any ongoing uploads. - -You must make sure that failed uploads are eventually aborted to avoid -unnecessary storage costs. You can use the -https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html[List -multipart uploads API] to list the ongoing uploads and look for any which are -unusually long-running, or you can -https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpu-abort-incomplete-mpu-lifecycle-config.html[configure -a bucket lifecycle policy] to automatically abort incomplete uploads once they -reach a certain age. - [[repository-s3-aws-vpc]] ==== AWS VPC bandwidth settings diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 1132111826563..1ab370ad203fc 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -10,13 +10,20 @@ import fixture.s3.S3HttpHandler; import com.amazonaws.http.AmazonHttpClient; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; +import com.amazonaws.services.s3.model.MultipartUpload; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.core.LogEvent; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -54,6 +61,7 @@ import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -70,6 +78,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -81,6 +90,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; @@ -451,6 +461,106 @@ private Map getServerMetrics() { return Collections.emptyMap(); } + public void testMultipartUploadCleanup() { + final String repoName = randomRepositoryName(); + createRepository(repoName, repositorySettings(repoName), true); + + createIndex("test-idx-1"); + for (int i = 0; i < 100; i++) { + prepareIndex("test-idx-1").setId(Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + final String snapshotName = randomIdentifier(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + final var repository = asInstanceOf( + S3Repository.class, + internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName) + ); + final var blobStore = asInstanceOf(S3BlobStore.class, asInstanceOf(BlobStoreWrapper.class, repository.blobStore()).delegate()); + + try (var clientRef = blobStore.clientReference()) { + final var danglingBlobName = randomIdentifier(); + final var initiateMultipartUploadRequest = new InitiateMultipartUploadRequest( + blobStore.bucket(), + blobStore.blobContainer(repository.basePath().add("test-multipart-upload")).path().buildAsString() + danglingBlobName + ); + initiateMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + final var multipartUploadResult = clientRef.client().initiateMultipartUpload(initiateMultipartUploadRequest); + + final var listMultipartUploadsRequest = new ListMultipartUploadsRequest(blobStore.bucket()).withPrefix( + repository.basePath().buildAsString() + ); + listMultipartUploadsRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + assertEquals( + List.of(multipartUploadResult.getUploadId()), + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList() + ); + + final var seenCleanupLogLatch = new CountDownLatch(1); + MockLog.assertThatLogger(() -> { + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName)); + safeAwait(seenCleanupLogLatch); + }, + S3BlobContainer.class, + new MockLog.SeenEventExpectation( + "found-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + "found [1] possibly-dangling multipart uploads; will clean them up after finalizing the current snapshot deletions" + ), + new MockLog.SeenEventExpectation( + "cleaned-dangling", + S3BlobContainer.class.getCanonicalName(), + Level.INFO, + Strings.format( + "cleaned up dangling multipart upload [%s] of blob [%s]*test-multipart-upload/%s]", + multipartUploadResult.getUploadId(), + repoName, + danglingBlobName + ) + ) { + @Override + public void match(LogEvent event) { + super.match(event); + if (Regex.simpleMatch(message, event.getMessage().getFormattedMessage())) { + seenCleanupLogLatch.countDown(); + } + } + } + ); + + assertThat( + clientRef.client() + .listMultipartUploads(listMultipartUploadsRequest) + .getMultipartUploads() + .stream() + .map(MultipartUpload::getUploadId) + .toList(), + empty() + ); + } + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -592,6 +702,9 @@ public void maybeTrack(final String rawRequest, Headers requestHeaders) { trackRequest("ListObjects"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.LIST_OBJECTS, purpose), k -> new AtomicLong()) .incrementAndGet(); + } else if (Regex.simpleMatch("GET /*/?uploads&*", request)) { + // TODO track ListMultipartUploads requests + logger.info("--> ListMultipartUploads not tracked [{}] with parsed purpose [{}]", request, purpose.getKey()); } else if (Regex.simpleMatch("GET /*/*", request)) { trackRequest("GetObject"); metricsCount.computeIfAbsent(new S3BlobStore.StatsKey(S3BlobStore.Operation.GET_OBJECT, purpose), k -> new AtomicLong()) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 3e2249bf82bb6..cf3e73df2aee2 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -28,13 +28,17 @@ import com.amazonaws.services.s3.model.UploadPartResult; import com.amazonaws.util.ValidationUtils; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.ThreadedActionListener; +import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; @@ -54,6 +58,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; import org.elasticsearch.repositories.s3.S3BlobStore.Operation; import org.elasticsearch.threadpool.ThreadPool; @@ -912,4 +917,94 @@ public void getRegister(OperationPurpose purpose, String key, ActionListener getMultipartUploadCleanupListener(int maxUploads, RefCountingRunnable refs) { + try (var clientReference = blobStore.clientReference()) { + final var bucket = blobStore.bucket(); + final var request = new ListMultipartUploadsRequest(bucket).withPrefix(keyPath).withMaxUploads(maxUploads); + request.putCustomQueryParameter(S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, OperationPurpose.SNAPSHOT_DATA.getKey()); + final var multipartUploadListing = SocketAccess.doPrivileged(() -> clientReference.client().listMultipartUploads(request)); + final var multipartUploads = multipartUploadListing.getMultipartUploads(); + if (multipartUploads.isEmpty()) { + logger.debug("found no multipart uploads to clean up"); + return ActionListener.noop(); + } else { + // the uploads are only _possibly_ dangling because it's also possible we're no longer then master and the new master has + // started some more shard snapshots + if (multipartUploadListing.isTruncated()) { + logger.info(""" + found at least [{}] possibly-dangling multipart uploads; will clean up the first [{}] after finalizing \ + the current snapshot deletions, and will check for further possibly-dangling multipart uploads in future \ + snapshot deletions""", multipartUploads.size(), multipartUploads.size()); + } else { + logger.info(""" + found [{}] possibly-dangling multipart uploads; \ + will clean them up after finalizing the current snapshot deletions""", multipartUploads.size()); + } + return newMultipartUploadCleanupListener( + refs, + multipartUploads.stream().map(u -> new AbortMultipartUploadRequest(bucket, u.getKey(), u.getUploadId())).toList() + ); + } + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. + logger.warn("failure while checking for possibly-dangling multipart uploads", e); + return ActionListener.noop(); + } + } + + private ActionListener newMultipartUploadCleanupListener( + RefCountingRunnable refs, + List abortMultipartUploadRequests + ) { + return new ThreadedActionListener<>(blobStore.getSnapshotExecutor(), ActionListener.releaseAfter(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + try (var clientReference = blobStore.clientReference()) { + for (final var abortMultipartUploadRequest : abortMultipartUploadRequests) { + abortMultipartUploadRequest.putCustomQueryParameter( + S3BlobStore.CUSTOM_QUERY_PARAMETER_PURPOSE, + OperationPurpose.SNAPSHOT_DATA.getKey() + ); + try { + SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortMultipartUploadRequest)); + logger.info( + "cleaned up dangling multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ); + } catch (Exception e) { + // Cleanup is a best-effort thing, we can't do anything better than log and carry on here. Note that any failure + // is surprising, even a 404 means that something else aborted/completed the upload at a point where there + // should be no other processes interacting with the repository. + logger.warn( + Strings.format( + "failed to clean up multipart upload [{}] of blob [{}][{}][{}]", + abortMultipartUploadRequest.getUploadId(), + blobStore.getRepositoryMetadata().name(), + abortMultipartUploadRequest.getBucketName(), + abortMultipartUploadRequest.getKey() + ), + e + ); + } + } + } + } + + @Override + public void onFailure(Exception e) { + logger.log( + MasterService.isPublishFailureException(e) + || (e instanceof RepositoryException repositoryException + && repositoryException.getCause() instanceof Exception cause + && MasterService.isPublishFailureException(cause)) ? Level.DEBUG : Level.WARN, + "failed to start cleanup of dangling multipart uploads", + e + ); + } + }, refs.acquire())); + } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 72b48c5903629..a6edb0dec4122 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ReferenceDocs; @@ -28,6 +29,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -36,14 +38,17 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.SnapshotDeleteListener; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import java.util.Collection; import java.util.Map; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -183,6 +188,16 @@ class S3Repository extends MeteredBlobStoreRepository { S3BlobStore.MAX_BULK_DELETES ); + /** + * Maximum number of uploads to request for cleanup when doing a snapshot delete. + */ + static final Setting MAX_MULTIPART_UPLOAD_CLEANUP_SIZE = Setting.intSetting( + "max_multipart_upload_cleanup_size", + 1000, + 0, + Setting.Property.Dynamic + ); + private final S3Service service; private final String bucket; @@ -459,4 +474,84 @@ public String getAnalysisFailureExtraDetail() { ReferenceDocs.S3_COMPATIBLE_REPOSITORIES ); } + + // only one multipart cleanup process running at once + private final AtomicBoolean multipartCleanupInProgress = new AtomicBoolean(); + + @Override + public void deleteSnapshots( + Collection snapshotIds, + long repositoryDataGeneration, + IndexVersion minimumNodeVersion, + SnapshotDeleteListener snapshotDeleteListener + ) { + getMultipartUploadCleanupListener( + isReadOnly() ? 0 : MAX_MULTIPART_UPLOAD_CLEANUP_SIZE.get(getMetadata().settings()), + new ActionListener<>() { + @Override + public void onResponse(ActionListener multipartUploadCleanupListener) { + S3Repository.super.deleteSnapshots( + snapshotIds, + repositoryDataGeneration, + minimumNodeVersion, + new SnapshotDeleteListener() { + @Override + public void onDone() { + snapshotDeleteListener.onDone(); + } + + @Override + public void onRepositoryDataWritten(RepositoryData repositoryData) { + multipartUploadCleanupListener.onResponse(null); + snapshotDeleteListener.onRepositoryDataWritten(repositoryData); + } + + @Override + public void onFailure(Exception e) { + multipartUploadCleanupListener.onFailure(e); + snapshotDeleteListener.onFailure(e); + } + } + ); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to get multipart uploads for cleanup during snapshot delete", e); + assert false : e; // getMultipartUploadCleanupListener doesn't throw and snapshotExecutor doesn't reject anything + snapshotDeleteListener.onFailure(e); + } + } + ); + } + + /** + * Capture the current list of multipart uploads, and (asynchronously) return a listener which, if completed successfully, aborts those + * uploads. Called at the start of a snapshot delete operation, at which point there should be no ongoing uploads (except in the case of + * a master failover). We protect against the master failover case by waiting until the delete operation successfully updates the root + * index-N blob before aborting any uploads. + */ + void getMultipartUploadCleanupListener(int maxUploads, ActionListener> listener) { + if (maxUploads == 0) { + listener.onResponse(ActionListener.noop()); + return; + } + + if (multipartCleanupInProgress.compareAndSet(false, true) == false) { + logger.info("multipart upload cleanup already in progress"); + listener.onResponse(ActionListener.noop()); + return; + } + + try (var refs = new RefCountingRunnable(() -> multipartCleanupInProgress.set(false))) { + snapshotExecutor.execute( + ActionRunnable.supply( + ActionListener.releaseAfter(listener, refs.acquire()), + () -> blobContainer() instanceof S3BlobContainer s3BlobContainer + ? s3BlobContainer.getMultipartUploadCleanupListener(maxUploads, refs) + : ActionListener.noop() + ) + ); + } + } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java index 7f363fe0b87c3..447e225005b58 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java @@ -61,6 +61,7 @@ public class S3HttpHandler implements HttpHandler { private final String bucket; private final String path; + private final String basePrefix; private final ConcurrentMap blobs = new ConcurrentHashMap<>(); private final ConcurrentMap uploads = new ConcurrentHashMap<>(); @@ -71,6 +72,7 @@ public S3HttpHandler(final String bucket) { public S3HttpHandler(final String bucket, @Nullable final String basePath) { this.bucket = Objects.requireNonNull(bucket); + this.basePrefix = Objects.requireNonNullElse(basePath, ""); this.path = bucket + (basePath != null && basePath.isEmpty() == false ? "/" + basePath : ""); } @@ -96,7 +98,9 @@ public void handle(final HttpExchange exchange) throws IOException { } else { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); } - } else if (Regex.simpleMatch("GET /" + bucket + "/?uploads&prefix=*", request)) { + } else if (isListMultipartUploadsRequest(request)) { + assert request.contains("prefix=" + basePrefix) : basePrefix + " vs " + request; + final Map params = new HashMap<>(); RestUtils.decodeQueryString(request, request.indexOf('?') + 1, params); final var prefix = params.get("prefix"); @@ -329,6 +333,11 @@ public void handle(final HttpExchange exchange) throws IOException { } } + private boolean isListMultipartUploadsRequest(String request) { + return Regex.simpleMatch("GET /" + bucket + "/?uploads&prefix=*", request) + || Regex.simpleMatch("GET /" + bucket + "/?uploads&max-uploads=*&prefix=*", request); + } + public Map blobs() { return blobs; } From 19dc8841d4053fd449a47c4e9bd26d35767d649e Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Mon, 19 Aug 2024 09:52:41 -0700 Subject: [PATCH 09/20] Fix synthetic source for empty nested objects (#111943) --- docs/changelog/111943.yaml | 6 ++ ...ogsIndexModeRandomDataChallengeRestIT.java | 7 +- .../index/mapper/NestedObjectMapper.java | 5 +- .../index/mapper/ObjectMapper.java | 14 +-- .../index/mapper/NestedObjectMapperTests.java | 91 +++++++++++++++++++ 5 files changed, 106 insertions(+), 17 deletions(-) create mode 100644 docs/changelog/111943.yaml diff --git a/docs/changelog/111943.yaml b/docs/changelog/111943.yaml new file mode 100644 index 0000000000000..6b9f03ccee31c --- /dev/null +++ b/docs/changelog/111943.yaml @@ -0,0 +1,6 @@ +pr: 111943 +summary: Fix synthetic source for empty nested objects +area: Mapping +type: bug +issues: + - 111811 diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index 0b41d62f6fe2c..8f23f86267261 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -42,10 +42,9 @@ public StandardVersusLogsIndexModeRandomDataChallengeRestIT() { this.subobjectsDisabled = randomBoolean(); var specificationBuilder = DataGeneratorSpecification.builder(); - // TODO enable nested fields when subobjects are enabled - // It currently hits a bug with empty nested objects - // Nested fields don't work with subobjects: false. - specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); + if (subobjectsDisabled) { + specificationBuilder = specificationBuilder.withNestedFieldsLimit(0); + } this.dataGenerator = new DataGenerator(specificationBuilder.withDataSourceHandlers(List.of(new DataSourceHandler() { @Override public DataSourceResponse.FieldTypeGenerator handle(DataSourceRequest.FieldTypeGenerator request) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 76212f9899f5c..23bdd0f559206 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -463,17 +463,14 @@ public boolean hasValue() { public void write(XContentBuilder b) throws IOException { assert (children != null && children.size() > 0); if (children.size() == 1) { - b.startObject(leafName()); + b.field(leafName()); leafStoredFieldLoader.advanceTo(children.get(0)); leafSourceLoader.write(leafStoredFieldLoader, children.get(0), b); - b.endObject(); } else { b.startArray(leafName()); for (int childId : children) { - b.startObject(); leafStoredFieldLoader.advanceTo(childId); leafSourceLoader.write(leafStoredFieldLoader, childId, b); - b.endObject(); } b.endArray(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index a3d5999a3dcd2..843fc3b15a6df 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -843,12 +843,10 @@ public void write(XContentBuilder b) throws IOException { return; } - if (isFragment == false) { - if (isRoot()) { - b.startObject(); - } else { - b.startObject(leafName()); - } + if (isRoot() || isFragment) { + b.startObject(); + } else { + b.startObject(leafName()); } if (ignoredValues != null && ignoredValues.isEmpty() == false) { @@ -875,9 +873,7 @@ public void write(XContentBuilder b) throws IOException { } } hasValue = false; - if (isFragment == false) { - b.endObject(); - } + b.endObject(); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 306887099849b..4fba22101df03 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1737,6 +1737,97 @@ public void testSyntheticNestedWithIncludeInRoot() throws IOException { {"path":{"bar":"B","foo":"A"}}""", syntheticSource); } + public void testSyntheticNestedWithEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { b.startObject("path").nullField("foo").endObject(); }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + + public void testSyntheticNestedWithEmptySubObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("to").startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject().endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("to").nullField("foo").endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + + public void testSyntheticNestedWithArrayContainingEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject().field("foo", "A").endObject(); + b.startObject().nullField("foo").endObject(); + } + b.endArray(); + }); + assertEquals(""" + {"path":[{"foo":"A"},{}]}""", syntheticSource); + } + + public void testSyntheticNestedWithArrayContainingOnlyEmptyObject() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").field("type", "nested"); + { + b.startObject("properties"); + { + b.startObject("foo").field("type", "keyword").endObject(); + } + b.endObject(); + } + b.endObject(); + })).documentMapper(); + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startArray("path"); + { + b.startObject().nullField("foo").endObject(); + } + b.endArray(); + }); + assertEquals(""" + {"path":{}}""", syntheticSource); + } + private NestedObjectMapper createNestedObjectMapperWithAllParametersSet(CheckedConsumer propertiesBuilder) throws IOException { DocumentMapper mapper = createDocumentMapper(mapping(b -> { From 6f33812fade9c69745fe2a3d66b063028c79f1d4 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 19 Aug 2024 12:02:04 -0700 Subject: [PATCH 10/20] Avoid losing error message in failure collector (#111983) The node-disconnected exception might not include the root cause. In this case, the failure collector incorrectly unwraps the exception and wraps it in a new Elasticsearch exception, losing the message. We should instead use the original exception to preserve the reason. Closes #111894 --- docs/changelog/111983.yaml | 6 ++++++ .../compute/operator/FailureCollector.java | 17 +++++++++++++---- .../operator/FailureCollectorTests.java | 19 +++++++++++++++++++ 3 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/111983.yaml diff --git a/docs/changelog/111983.yaml b/docs/changelog/111983.yaml new file mode 100644 index 0000000000000..d5043d0b44155 --- /dev/null +++ b/docs/changelog/111983.yaml @@ -0,0 +1,6 @@ +pr: 111983 +summary: Avoid losing error message in failure collector +area: ES|QL +type: bug +issues: + - 111894 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java index 99edab038af31..943ba4dc1f4fa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FailureCollector.java @@ -46,10 +46,19 @@ public FailureCollector(int maxExceptions) { this.maxExceptions = maxExceptions; } - public void unwrapAndCollect(Exception originEx) { - final Exception e = originEx instanceof TransportException - ? (originEx.getCause() instanceof Exception cause ? cause : new ElasticsearchException(originEx.getCause())) - : originEx; + private static Exception unwrapTransportException(TransportException te) { + final Throwable cause = te.getCause(); + if (cause == null) { + return te; + } else if (cause instanceof Exception ex) { + return ex; + } else { + return new ElasticsearchException(cause); + } + } + + public void unwrapAndCollect(Exception e) { + e = e instanceof TransportException te ? unwrapTransportException(te) : e; if (ExceptionsHelper.unwrap(e, TaskCancelledException.class) != null) { if (cancelledExceptionsCount.incrementAndGet() <= maxExceptions) { cancelledExceptions.add(e); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java index d5fa0a1eaecc9..637cbe8892b3e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FailureCollectorTests.java @@ -7,12 +7,15 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.NodeDisconnectedException; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.TransportException; import org.hamcrest.Matchers; import java.io.IOException; @@ -25,6 +28,9 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; public class FailureCollectorTests extends ESTestCase { @@ -87,4 +93,17 @@ public void testEmpty() { assertFalse(collector.hasFailure()); assertNull(collector.getFailure()); } + + public void testTransportExceptions() { + FailureCollector collector = new FailureCollector(5); + collector.unwrapAndCollect(new NodeDisconnectedException(DiscoveryNodeUtils.builder("node-1").build(), "/field_caps")); + collector.unwrapAndCollect(new TransportException(new CircuitBreakingException("too large", CircuitBreaker.Durability.TRANSIENT))); + Exception failure = collector.getFailure(); + assertNotNull(failure); + assertThat(failure, instanceOf(NodeDisconnectedException.class)); + assertThat(failure.getMessage(), equalTo("[][0.0.0.0:1][/field_caps] disconnected")); + Throwable[] suppressed = failure.getSuppressed(); + assertThat(suppressed, arrayWithSize(1)); + assertThat(suppressed[0], instanceOf(CircuitBreakingException.class)); + } } From dc24003540e02152fe64893d7c38af3f3dc31996 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 19 Aug 2024 17:29:01 -0400 Subject: [PATCH 11/20] ESQL: Profile more timing information (#111855) This profiles additional timing information for each individual driver. To the results from `profile` it adds the start and stop time for each driver. That was already in the task status. To the profile and task status it also adds the number of times the driver slept and some more detailed history about a few of those times. Explanation time! The compute engine splits work into some number of `Drivers` per node. Each `Driver` is a single threaded entity - it runs on a thread for a while then does one of three things: 1. Finishes 2. Goes async because one of it's `Operator`s has gone async 3. Yields the thread pool because it has run for too long This PR measures the second two. At this point only three operators can go async: * ENRICH * Reading from an empty exchange * Writing to a full exchange We're quite interested the these sleeps at the moment because they think they may be slowing things down. Here's what it looks like when a driver goes async because it wants to read from an empty exchange: ``` ... the rest of the profile ... "sleeps" : { "counts" : { "exchange empty" : 2 }, "first" : [ { "reason" : "exchange empty", "sleep" : "2024-08-13T19:45:57.943Z", "sleep_millis" : 1723578357943, "wake" : "2024-08-13T19:45:58.159Z", "wake_millis" : 1723578358159 }, { "reason" : "exchange empty", "sleep" : "2024-08-13T19:45:58.164Z", "sleep_millis" : 1723578358164, "wake" : "2024-08-13T19:45:58.165Z", "wake_millis" : 1723578358165 } ], "last": [same as above] ``` Every time the driver goes async we count it in the `counts` map - grouped by the reason the driver slept. We also record the sleep and wake times for the first and last ten times the driver sleeps. In this case it only slept twice, so the `first` and `last` ten times is the same array. This should give us a good sense about why drivers sleep while using a limited amount of memory per driver. --- docs/changelog/111855.yaml | 5 + .../description/to_datetime.asciidoc | 2 + .../kibana/definition/to_datetime.json | 1 + .../esql/functions/kibana/docs/to_datetime.md | 1 + .../org/elasticsearch/TransportVersions.java | 1 + .../compute/operator/AsyncOperator.java | 4 +- .../compute/operator/Driver.java | 108 ++++++-- .../compute/operator/DriverProfile.java | 81 +++++- .../compute/operator/DriverSleeps.java | 180 +++++++++++++ .../compute/operator/DriverStatus.java | 38 ++- .../compute/operator/IsBlockedResult.java | 31 +++ .../compute/operator/Operator.java | 4 +- .../operator/exchange/ExchangeBuffer.java | 9 +- .../operator/exchange/ExchangeSink.java | 4 +- .../exchange/ExchangeSinkHandler.java | 3 +- .../exchange/ExchangeSinkOperator.java | 6 +- .../operator/exchange/ExchangeSource.java | 4 +- .../exchange/ExchangeSourceHandler.java | 11 +- .../exchange/ExchangeSourceOperator.java | 10 +- .../compute/operator/AsyncOperatorTests.java | 49 ++-- .../compute/operator/DriverProfileTests.java | 60 ++++- .../compute/operator/DriverSleepsTests.java | 240 ++++++++++++++++++ .../compute/operator/DriverStatusTests.java | 42 ++- .../exchange/ExchangeServiceTests.java | 28 +- .../xpack/esql/qa/single_node/RestEsqlIT.java | 126 ++++++++- .../action/EsqlQueryResponseProfileTests.java | 6 +- .../esql/action/EsqlQueryResponseTests.java | 15 +- .../esql/plugin/ComputeListenerTests.java | 13 +- 28 files changed, 954 insertions(+), 128 deletions(-) create mode 100644 docs/changelog/111855.yaml create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml new file mode 100644 index 0000000000000..3f15e9c20135a --- /dev/null +++ b/docs/changelog/111855.yaml @@ -0,0 +1,5 @@ +pr: 111855 +summary: "ESQL: Profile more timing information" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/reference/esql/functions/description/to_datetime.asciidoc b/docs/reference/esql/functions/description/to_datetime.asciidoc index b37bd6b22ac2f..ee6866da9ee34 100644 --- a/docs/reference/esql/functions/description/to_datetime.asciidoc +++ b/docs/reference/esql/functions/description/to_datetime.asciidoc @@ -3,3 +3,5 @@ *Description* Converts an input value to a date value. A string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use <>. + +NOTE: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded. diff --git a/docs/reference/esql/functions/kibana/definition/to_datetime.json b/docs/reference/esql/functions/kibana/definition/to_datetime.json index 10fcf8b22e8b0..778d151c40151 100644 --- a/docs/reference/esql/functions/kibana/definition/to_datetime.json +++ b/docs/reference/esql/functions/kibana/definition/to_datetime.json @@ -3,6 +3,7 @@ "type" : "eval", "name" : "to_datetime", "description" : "Converts an input value to a date value.\nA string will only be successfully converted if it's respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`.\nTo convert dates in other formats, use <>.", + "note" : "Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/docs/to_datetime.md b/docs/reference/esql/functions/kibana/docs/to_datetime.md index 5e8f9c72adc2c..613381615421a 100644 --- a/docs/reference/esql/functions/kibana/docs/to_datetime.md +++ b/docs/reference/esql/functions/kibana/docs/to_datetime.md @@ -11,3 +11,4 @@ To convert dates in other formats, use <>. ROW string = ["1953-09-02T00:00:00.000Z", "1964-06-02T00:00:00.000Z", "1964-06-02 00:00:00"] | EVAL datetime = TO_DATETIME(string) ``` +Note: Note that when converting from nanosecond resolution to millisecond resolution with this function, the nanosecond date istruncated, not rounded. diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 1009d9e2ae7d1..3bece535aab0f 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -192,6 +192,7 @@ static TransportVersion def(int id) { public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_722_00_0); public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); + public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java index 79359737b1b35..92213eca7b477 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AsyncOperator.java @@ -199,7 +199,7 @@ public Page getOutput() { } @Override - public SubscribableListener isBlocked() { + public IsBlockedResult isBlocked() { // TODO: Add an exchange service between async operation instead? if (finished) { return Operator.NOT_BLOCKED; @@ -216,7 +216,7 @@ public SubscribableListener isBlocked() { if (blockedFuture == null) { blockedFuture = new SubscribableListener<>(); } - return blockedFuture; + return new IsBlockedResult(blockedFuture, getClass().getSimpleName()); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java index 785db826aadd6..acbf8a17b31fd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Driver.java @@ -127,7 +127,17 @@ public Driver( this.statusNanos = statusInterval.nanos(); this.releasable = releasable; this.status = new AtomicReference<>( - new DriverStatus(sessionId, startTime, System.currentTimeMillis(), 0, 0, DriverStatus.Status.QUEUED, List.of(), List.of()) + new DriverStatus( + sessionId, + startTime, + System.currentTimeMillis(), + 0, + 0, + DriverStatus.Status.QUEUED, + List.of(), + List.of(), + DriverSleeps.empty() + ) ); } @@ -170,35 +180,36 @@ public DriverContext driverContext() { * thread to do other work instead of blocking or busy-spinning on the blocked operator. */ SubscribableListener run(TimeValue maxTime, int maxIterations, LongSupplier nowSupplier) { + updateStatus(0, 0, DriverStatus.Status.RUNNING, "driver running"); long maxTimeNanos = maxTime.nanos(); long startTime = nowSupplier.getAsLong(); long nextStatus = startTime + statusNanos; int iter = 0; while (true) { - SubscribableListener fut = runSingleLoopIteration(); + IsBlockedResult isBlocked = runSingleLoopIteration(); iter++; - if (fut.isDone() == false) { - updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC); - return fut; + if (isBlocked.listener().isDone() == false) { + updateStatus(nowSupplier.getAsLong() - startTime, iter, DriverStatus.Status.ASYNC, isBlocked.reason()); + return isBlocked.listener(); } if (isFinished()) { finishNanos = nowSupplier.getAsLong(); - updateStatus(finishNanos - startTime, iter, DriverStatus.Status.DONE); + updateStatus(finishNanos - startTime, iter, DriverStatus.Status.DONE, "driver done"); driverContext.finish(); Releasables.close(releasable, driverContext.getSnapshot()); - return Operator.NOT_BLOCKED; + return Operator.NOT_BLOCKED.listener(); } long now = nowSupplier.getAsLong(); if (iter >= maxIterations) { - updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); - return Operator.NOT_BLOCKED; + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING, "driver iterations"); + return Operator.NOT_BLOCKED.listener(); } if (now - startTime >= maxTimeNanos) { - updateStatus(now - startTime, iter, DriverStatus.Status.WAITING); - return Operator.NOT_BLOCKED; + updateStatus(now - startTime, iter, DriverStatus.Status.WAITING, "driver time"); + return Operator.NOT_BLOCKED.listener(); } if (now > nextStatus) { - updateStatus(now - startTime, iter, DriverStatus.Status.RUNNING); + updateStatus(now - startTime, iter, DriverStatus.Status.RUNNING, "driver running"); nextStatus = now + statusNanos; } } @@ -230,7 +241,7 @@ public void abort(Exception reason, ActionListener listener) { } } - private SubscribableListener runSingleLoopIteration() { + private IsBlockedResult runSingleLoopIteration() { ensureNotCancelled(); boolean movedPage = false; @@ -239,7 +250,7 @@ private SubscribableListener runSingleLoopIteration() { Operator nextOp = activeOperators.get(i + 1); // skip blocked operator - if (op.isBlocked().isDone() == false) { + if (op.isBlocked().listener().isDone() == false) { continue; } @@ -290,7 +301,10 @@ private SubscribableListener runSingleLoopIteration() { if (movedPage == false) { return oneOf( - activeOperators.stream().map(Operator::isBlocked).filter(laf -> laf.isDone() == false).collect(Collectors.toList()) + activeOperators.stream() + .map(Operator::isBlocked) + .filter(laf -> laf.listener().isDone() == false) + .collect(Collectors.toList()) ); } return Operator.NOT_BLOCKED; @@ -327,7 +341,7 @@ public static void start( ) { driver.completionListener.addListener(listener); if (driver.started.compareAndSet(false, true)) { - driver.updateStatus(0, 0, DriverStatus.Status.STARTING); + driver.updateStatus(0, 0, DriverStatus.Status.STARTING, "driver starting"); schedule(DEFAULT_TIME_BEFORE_YIELDING, maxIterations, threadContext, executor, driver, driver.completionListener); } } @@ -394,18 +408,23 @@ void onComplete(ActionListener listener) { }); } - private static SubscribableListener oneOf(List> futures) { - if (futures.isEmpty()) { + private static IsBlockedResult oneOf(List results) { + if (results.isEmpty()) { return Operator.NOT_BLOCKED; } - if (futures.size() == 1) { - return futures.get(0); + if (results.size() == 1) { + return results.get(0); } SubscribableListener oneOf = new SubscribableListener<>(); - for (SubscribableListener fut : futures) { - fut.addListener(oneOf); + StringBuilder reason = new StringBuilder(); + for (IsBlockedResult r : results) { + r.listener().addListener(oneOf); + if (reason.isEmpty() == false) { + reason.append(" OR "); + } + reason.append(r.reason()); } - return oneOf; + return new IsBlockedResult(oneOf, reason.toString()); } @Override @@ -440,7 +459,15 @@ public DriverProfile profile() { if (status.status() != DriverStatus.Status.DONE) { throw new IllegalStateException("can only get profile from finished driver"); } - return new DriverProfile(finishNanos - startNanos, status.cpuNanos(), status.iterations(), status.completedOperators()); + return new DriverProfile( + status.started(), + status.lastUpdated(), + finishNanos - startNanos, + status.cpuNanos(), + status.iterations(), + status.completedOperators(), + status.sleeps() + ); } /** @@ -449,17 +476,44 @@ public DriverProfile profile() { * @param extraIterations how many iterations to add to the previous status * @param status the status of the overall driver request */ - private void updateStatus(long extraCpuNanos, int extraIterations, DriverStatus.Status status) { + private void updateStatus(long extraCpuNanos, int extraIterations, DriverStatus.Status status, String reason) { this.status.getAndUpdate(prev -> { + long now = System.currentTimeMillis(); + DriverSleeps sleeps = prev.sleeps(); + + // Rebuild the sleeps or bail entirely based on the updated status. + // Sorry for the complexity here. If anyone has a nice way to refactor this, be my guest. + switch (status) { + case ASYNC, WAITING -> sleeps = sleeps.sleep(reason, now); + case RUNNING -> { + switch (prev.status()) { + case ASYNC, WAITING -> sleeps = sleeps.wake(now); + case STARTING -> { + if (extraIterations == 0) { + /* + * 0 extraIterations means we haven't started the loop - we're just + * signaling that we've woken up. We don't need to signal that when + * the state is already STARTING because we don't have anything + * interesting to report. And some tests rely on the status staying + * in the STARTING state until the first status report. + */ + return prev; + } + } + } + } + } + return new DriverStatus( sessionId, startTime, - System.currentTimeMillis(), + now, prev.cpuNanos() + extraCpuNanos, prev.iterations() + extraIterations, status, statusOfCompletedOperators, - activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList() + activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList(), + sleeps ); }); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java index 414fbbbca8294..e7b16072f4b66 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java @@ -27,6 +27,16 @@ * Profile results from a single {@link Driver}. */ public class DriverProfile implements Writeable, ChunkedToXContentObject { + /** + * Millis since epoch when the driver started. + */ + private final long startMillis; + + /** + * Millis since epoch when the driver stopped. + */ + private final long stopMillis; + /** * Nanos between creation and completion of the {@link Driver}. */ @@ -45,18 +55,38 @@ public class DriverProfile implements Writeable, ChunkedToXContentObject { private final long iterations; /** - * Status of each {@link Operator} in the driver when it finishes. + * Status of each {@link Operator} in the driver when it finished. */ private final List operators; - public DriverProfile(long tookNanos, long cpuNanos, long iterations, List operators) { + private final DriverSleeps sleeps; + + public DriverProfile( + long startMillis, + long stopMillis, + long tookNanos, + long cpuNanos, + long iterations, + List operators, + DriverSleeps sleeps + ) { + this.startMillis = startMillis; + this.stopMillis = stopMillis; this.tookNanos = tookNanos; this.cpuNanos = cpuNanos; this.iterations = iterations; this.operators = operators; + this.sleeps = sleeps; } public DriverProfile(StreamInput in) throws IOException { + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + this.startMillis = in.readVLong(); + this.stopMillis = in.readVLong(); + } else { + this.startMillis = 0; + this.stopMillis = 0; + } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { this.tookNanos = in.readVLong(); this.cpuNanos = in.readVLong(); @@ -67,16 +97,36 @@ public DriverProfile(StreamInput in) throws IOException { this.iterations = 0; } this.operators = in.readCollectionAsImmutableList(DriverStatus.OperatorStatus::new); + this.sleeps = DriverSleeps.read(in); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + out.writeVLong(startMillis); + out.writeVLong(stopMillis); + } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { out.writeVLong(tookNanos); out.writeVLong(cpuNanos); out.writeVLong(iterations); } out.writeCollection(operators); + sleeps.writeTo(out); + } + + /** + * Millis since epoch when the driver started. + */ + public long startMillis() { + return startMillis; + } + + /** + * Millis since epoch when the driver stopped. + */ + public long stopMillis() { + return stopMillis; } /** @@ -102,13 +152,25 @@ public long iterations() { return iterations; } + /** + * Status of each {@link Operator} in the driver when it finished. + */ public List operators() { return operators; } + /** + * Records of the times the driver has slept. + */ + public DriverSleeps sleeps() { + return sleeps; + } + @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat(ChunkedToXContentHelper.startObject(), Iterators.single((b, p) -> { + b.timeField("start_millis", "start", startMillis); + b.timeField("stop_millis", "stop", stopMillis); b.field("took_nanos", tookNanos); if (b.humanReadable()) { b.field("took_time", TimeValue.timeValueNanos(tookNanos)); @@ -119,7 +181,11 @@ public Iterator toXContentChunked(ToXContent.Params params } b.field("iterations", iterations); return b; - }), ChunkedToXContentHelper.array("operators", operators.iterator()), ChunkedToXContentHelper.endObject()); + }), + ChunkedToXContentHelper.array("operators", operators.iterator()), + Iterators.single((b, p) -> b.field("sleeps", sleeps)), + ChunkedToXContentHelper.endObject() + ); } @Override @@ -131,15 +197,18 @@ public boolean equals(Object o) { return false; } DriverProfile that = (DriverProfile) o; - return tookNanos == that.tookNanos + return startMillis == that.startMillis + && stopMillis == that.stopMillis + && tookNanos == that.tookNanos && cpuNanos == that.cpuNanos && iterations == that.iterations - && Objects.equals(operators, that.operators); + && Objects.equals(operators, that.operators) + && sleeps.equals(that.sleeps); } @Override public int hashCode() { - return Objects.hash(tookNanos, cpuNanos, iterations, operators); + return Objects.hash(startMillis, stopMillis, tookNanos, cpuNanos, iterations, operators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java new file mode 100644 index 0000000000000..217a0b033bed4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +/** + * Records of the times the driver has slept. + * @param counts map from the reason the driver has slept to the number of times it slept for that reason + * @param first the first few times the driver slept + * @param last the last few times the driver slept + */ +public record DriverSleeps(Map counts, List first, List last) implements Writeable, ToXContentObject { + /** + * A record of a time the driver slept. + * @param reason The reason the driver slept + * @param sleep Millis since epoch when the driver slept + * @param wake Millis since epoch when the driver woke, or 0 if it is currently sleeping + */ + public record Sleep(String reason, long sleep, long wake) implements Writeable, ToXContentObject { + Sleep(StreamInput in) throws IOException { + this(in.readString(), in.readLong(), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(reason); + out.writeLong(sleep); + out.writeLong(wake); + } + + Sleep wake(long now) { + if (isStillSleeping() == false) { + throw new IllegalStateException("Already awake."); + } + return new Sleep(reason, sleep, now); + } + + public boolean isStillSleeping() { + return wake == 0; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("reason", reason); + builder.timeField("sleep_millis", "sleep", sleep); + if (wake > 0) { + builder.timeField("wake_millis", "wake", wake); + } + return builder.endObject(); + } + } + + /** + * How many sleeps of the first and last sleeps and wakes to keep. + */ + static final int RECORDS = 10; + + public static DriverSleeps read(StreamInput in) throws IOException { + if (in.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + return empty(); + } + return new DriverSleeps( + in.readImmutableMap(StreamInput::readVLong), + in.readCollectionAsList(Sleep::new), + in.readCollectionAsList(Sleep::new) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + return; + } + out.writeMap(counts, StreamOutput::writeVLong); + out.writeCollection(first); + out.writeCollection(last); + } + + public static DriverSleeps empty() { + return new DriverSleeps(Map.of(), List.of(), List.of()); + } + + /** + * Record a sleep. + * @param reason the reason for the sleep + * @param now the current time + */ + public DriverSleeps sleep(String reason, long now) { + if (last.isEmpty() == false) { + Sleep lastLast = last.get(last.size() - 1); + if (lastLast.isStillSleeping()) { + throw new IllegalStateException("Still sleeping."); + } + } + Map newCounts = new TreeMap<>(counts); + newCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + List newFirst = first.size() < RECORDS ? append(first, reason, now) : first; + List newLast = last.size() < RECORDS ? append(last, reason, now) : rollOnto(last, reason, now); + return new DriverSleeps(newCounts, newFirst, newLast); + } + + /** + * Record a wake. + * @param now the current time + */ + public DriverSleeps wake(long now) { + if (now == 0) { + throw new IllegalStateException("Can't wake at epoch. That's used to signal sleeping."); + } + if (last.isEmpty()) { + throw new IllegalStateException("Never slept."); + } + Sleep lastFirst = first.get(first.size() - 1); + List newFirst = lastFirst.wake == 0 ? wake(first, now) : first; + return new DriverSleeps(counts, newFirst, wake(last, now)); + } + + private List append(List old, String reason, long now) { + List sleeps = new ArrayList<>(old.size() + 1); + sleeps.addAll(old); + sleeps.add(new Sleep(reason, now, 0)); + return Collections.unmodifiableList(sleeps); + } + + private List rollOnto(List old, String reason, long now) { + List sleeps = new ArrayList<>(old.size()); + for (int i = 1; i < old.size(); i++) { + sleeps.add(old.get(i)); + } + sleeps.add(new Sleep(reason, now, 0)); + return Collections.unmodifiableList(sleeps); + } + + private List wake(List old, long now) { + List sleeps = new ArrayList<>(old); + sleeps.set(sleeps.size() - 1, old.get(old.size() - 1).wake(now)); + return Collections.unmodifiableList(sleeps); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("counts"); + for (Map.Entry count : counts.entrySet()) { + builder.field(count.getKey(), count.getValue()); + } + builder.endObject(); + toXContent(builder, params, "first", first); + toXContent(builder, params, "last", last); + return builder.endObject(); + } + + private static void toXContent(XContentBuilder builder, ToXContent.Params params, String name, List sleeps) throws IOException { + builder.startArray(name); + for (Sleep sleep : sleeps) { + sleep.toXContent(builder, params); + } + builder.endArray(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java index c7a0c7d4bacb9..42e3908231206 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java @@ -79,6 +79,8 @@ public class DriverStatus implements Task.Status { */ private final List activeOperators; + private final DriverSleeps sleeps; + DriverStatus( String sessionId, long started, @@ -87,7 +89,8 @@ public class DriverStatus implements Task.Status { long iterations, Status status, List completedOperators, - List activeOperators + List activeOperators, + DriverSleeps sleeps ) { this.sessionId = sessionId; this.started = started; @@ -97,6 +100,7 @@ public class DriverStatus implements Task.Status { this.status = status; this.completedOperators = completedOperators; this.activeOperators = activeOperators; + this.sleeps = sleeps; } public DriverStatus(StreamInput in) throws IOException { @@ -105,13 +109,14 @@ public DriverStatus(StreamInput in) throws IOException { this.lastUpdated = in.readLong(); this.cpuNanos = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; this.iterations = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; - this.status = Status.valueOf(in.readString()); + this.status = Status.read(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.completedOperators = in.readCollectionAsImmutableList(OperatorStatus::new); } else { this.completedOperators = List.of(); } this.activeOperators = in.readCollectionAsImmutableList(OperatorStatus::new); + this.sleeps = DriverSleeps.read(in); } @Override @@ -125,11 +130,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cpuNanos); out.writeVLong(iterations); } - out.writeString(status.toString()); + status.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeCollection(completedOperators); } out.writeCollection(activeOperators); + sleeps.writeTo(out); } @Override @@ -188,6 +194,13 @@ public List completedOperators() { return completedOperators; } + /** + * Records of the times the driver has slept. + */ + public DriverSleeps sleeps() { + return sleeps; + } + /** * Status of each active {@link Operator} in the driver. */ @@ -206,7 +219,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("cpu_time", TimeValue.timeValueNanos(cpuNanos)); } builder.field("iterations", iterations); - builder.field("status", status.toString().toLowerCase(Locale.ROOT)); + builder.field("status", status, params); builder.startArray("completed_operators"); for (OperatorStatus completed : completedOperators) { builder.value(completed); @@ -217,6 +230,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(active); } builder.endArray(); + builder.field("sleeps", sleeps, params); return builder.endObject(); } @@ -232,12 +246,13 @@ public boolean equals(Object o) { && iterations == that.iterations && status == that.status && completedOperators.equals(that.completedOperators) - && activeOperators.equals(that.activeOperators); + && activeOperators.equals(that.activeOperators) + && sleeps.equals(that.sleeps); } @Override public int hashCode() { - return Objects.hash(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); + return Objects.hash(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators, sleeps); } @Override @@ -313,7 +328,7 @@ public String toString() { } } - public enum Status implements ToXContentFragment { + public enum Status implements Writeable, ToXContentFragment { QUEUED, STARTING, RUNNING, @@ -321,6 +336,15 @@ public enum Status implements ToXContentFragment { WAITING, DONE; + public static Status read(StreamInput in) throws IOException { + return Status.valueOf(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(toString()); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.value(toString().toLowerCase(Locale.ROOT)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java new file mode 100644 index 0000000000000..9e9c64dfbfed4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/IsBlockedResult.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.action.support.SubscribableListener; + +import java.util.Map; + +/** + * Is this {@link Operator} blocked? + *

+ * If the {@link #listener}'s {@link SubscribableListener#isDone()} method + * returns {@code true} then the {@linkplain Operator} is not blocked. + *

+ *

+ * If the {@linkplain Operator} is blocked then you can + * {@link SubscribableListener#addListener} to the {@link #listener} to be + * notified when the {@linkplain Operator} is unblocked. + *

+ * @param listener a listener to check for blocked-ness + * @param reason the reason that the {@linkplain Operator} is blocked. + * This is used as a {@link Map} key so this shouldn't + * vary wildly, but it should be descriptive of the reason + * the operator went async. + */ +public record IsBlockedResult(SubscribableListener listener, String reason) {} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java index 1038277c39fe1..663e06756551b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java @@ -88,11 +88,11 @@ default Status status() { * If the operator is not blocked, this method returns {@link #NOT_BLOCKED} which is an already * completed future. */ - default SubscribableListener isBlocked() { + default IsBlockedResult isBlocked() { return NOT_BLOCKED; } - SubscribableListener NOT_BLOCKED = SubscribableListener.newSucceeded(null); + IsBlockedResult NOT_BLOCKED = new IsBlockedResult(SubscribableListener.newSucceeded(null), "not blocked"); /** * A factory for creating intermediate operators. diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java index df6c09ea1ff97..ce400ddbdd6f9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import java.util.Queue; @@ -83,7 +84,7 @@ private void notifyNotFull() { } } - SubscribableListener waitForWriting() { + IsBlockedResult waitForWriting() { // maxBufferSize check is not water-tight as more than one sink can pass this check at the same time. if (queueSize.get() < maxSize || noMoreInputs) { return Operator.NOT_BLOCKED; @@ -95,11 +96,11 @@ SubscribableListener waitForWriting() { if (notFullFuture == null) { notFullFuture = new SubscribableListener<>(); } - return notFullFuture; + return new IsBlockedResult(notFullFuture, "exchange full"); } } - SubscribableListener waitForReading() { + IsBlockedResult waitForReading() { if (size() > 0 || noMoreInputs) { return Operator.NOT_BLOCKED; } @@ -110,7 +111,7 @@ SubscribableListener waitForReading() { if (notEmptyFuture == null) { notEmptyFuture = new SubscribableListener<>(); } - return notEmptyFuture; + return new IsBlockedResult(notEmptyFuture, "exchange empty"); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java index 8f0208740b689..e96ca9e39b7e5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSink.java @@ -7,8 +7,8 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; /** * Sink for exchanging data @@ -33,5 +33,5 @@ public interface ExchangeSink { /** * Whether the sink is blocked on adding more pages */ - SubscribableListener waitForWriting(); + IsBlockedResult waitForWriting(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java index ab155d6ee8479..757a3262433c8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkHandler.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -81,7 +82,7 @@ public boolean isFinished() { } @Override - public SubscribableListener waitForWriting() { + public IsBlockedResult waitForWriting() { return buffer.waitForWriting(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java index 01354d681017a..dd89dfe480c36 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperator.java @@ -9,13 +9,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SinkOperator; import org.elasticsearch.xcontent.XContentBuilder; @@ -65,13 +65,13 @@ public void finish() { } @Override - public SubscribableListener isBlocked() { + public IsBlockedResult isBlocked() { return sink.waitForWriting(); } @Override public boolean needsInput() { - return isFinished() == false && isBlocked().isDone(); + return isFinished() == false && isBlocked().listener().isDone(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java index 01ed5e3fb6388..aa3374aa26d3f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSource.java @@ -7,8 +7,8 @@ package org.elasticsearch.compute.operator.exchange; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.IsBlockedResult; /** * Source for exchanging data @@ -38,5 +38,5 @@ public interface ExchangeSource { /** * Allows callers to stop reading from the source when it's blocked */ - SubscribableListener waitForReading(); + IsBlockedResult waitForReading(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java index 77b535949eb9d..406dc4494208c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceHandler.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.FailureCollector; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.core.Releasable; import java.util.List; @@ -70,7 +71,7 @@ public boolean isFinished() { } @Override - public SubscribableListener waitForReading() { + public IsBlockedResult waitForReading() { return buffer.waitForReading(); } @@ -178,13 +179,13 @@ void fetchPage() { if (resp.finished()) { onSinkComplete(); } else { - SubscribableListener future = buffer.waitForWriting(); - if (future.isDone()) { + IsBlockedResult future = buffer.waitForWriting(); + if (future.listener().isDone()) { if (loopControl.tryResume() == false) { fetchPage(); } } else { - future.addListener(ActionListener.wrap(unused -> { + future.listener().addListener(ActionListener.wrap(unused -> { if (loopControl.tryResume() == false) { fetchPage(); } @@ -198,7 +199,7 @@ void fetchPage() { void onSinkFailed(Exception e) { failure.unwrapAndCollect(e); - buffer.waitForReading().onResponse(null); // resume the Driver if it is being blocked on reading + buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading onSinkComplete(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java index 1efba31bd831b..2d0ce228e81df 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeSourceOperator.java @@ -9,13 +9,13 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.IsBlockedResult; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,7 +30,7 @@ public class ExchangeSourceOperator extends SourceOperator { private final ExchangeSource source; - private SubscribableListener isBlocked = NOT_BLOCKED; + private IsBlockedResult isBlocked = NOT_BLOCKED; private int pagesEmitted; public record ExchangeSourceOperatorFactory(Supplier exchangeSources) implements SourceOperatorFactory { @@ -70,10 +70,10 @@ public void finish() { } @Override - public SubscribableListener isBlocked() { - if (isBlocked.isDone()) { + public IsBlockedResult isBlocked() { + if (isBlocked.listener().isDone()) { isBlocked = source.waitForReading(); - if (isBlocked.isDone()) { + if (isBlocked.listener().isDone()) { isBlocked = NOT_BLOCKED; } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index ae4558d5f8f71..fbcf11cd948c0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -159,49 +159,56 @@ public void doClose() { Releasables.close(localBreaker); } - public void testStatus() { - BlockFactory blockFactory = blockFactory(); - DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); + class TestOp extends AsyncOperator { Map> handlers = new HashMap<>(); - AsyncOperator operator = new AsyncOperator(driverContext, 2) { - @Override - protected void performAsync(Page inputPage, ActionListener listener) { - handlers.put(inputPage, listener); - } - @Override - protected void doClose() { + TestOp(DriverContext driverContext, int maxOutstandingRequests) { + super(driverContext, maxOutstandingRequests); + } - } - }; - assertTrue(operator.isBlocked().isDone()); + @Override + protected void performAsync(Page inputPage, ActionListener listener) { + handlers.put(inputPage, listener); + } + + @Override + protected void doClose() { + + } + } + + public void testStatus() { + BlockFactory blockFactory = blockFactory(); + DriverContext driverContext = new DriverContext(blockFactory.bigArrays(), blockFactory); + TestOp operator = new TestOp(driverContext, 2); + assertTrue(operator.isBlocked().listener().isDone()); assertTrue(operator.needsInput()); Page page1 = new Page(driverContext.blockFactory().newConstantNullBlock(1)); operator.addInput(page1); - assertFalse(operator.isBlocked().isDone()); - SubscribableListener blocked1 = operator.isBlocked(); + assertFalse(operator.isBlocked().listener().isDone()); + SubscribableListener blocked1 = operator.isBlocked().listener(); assertTrue(operator.needsInput()); Page page2 = new Page(driverContext.blockFactory().newConstantNullBlock(2)); operator.addInput(page2); assertFalse(operator.needsInput()); // reached the max outstanding requests - assertFalse(operator.isBlocked().isDone()); - assertThat(operator.isBlocked(), equalTo(blocked1)); + assertFalse(operator.isBlocked().listener().isDone()); + assertThat(operator.isBlocked(), equalTo(new IsBlockedResult(blocked1, "TestOp"))); Page page3 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); - handlers.remove(page1).onResponse(page3); + operator.handlers.remove(page1).onResponse(page3); page1.releaseBlocks(); assertFalse(operator.needsInput()); // still have 2 outstanding requests - assertTrue(operator.isBlocked().isDone()); + assertTrue(operator.isBlocked().listener().isDone()); assertTrue(blocked1.isDone()); assertThat(operator.getOutput(), equalTo(page3)); page3.releaseBlocks(); assertTrue(operator.needsInput()); - assertFalse(operator.isBlocked().isDone()); + assertFalse(operator.isBlocked().listener().isDone()); Page page4 = new Page(driverContext.blockFactory().newConstantNullBlock(3)); - handlers.remove(page2).onResponse(page4); + operator.handlers.remove(page2).onResponse(page4); page2.releaseBlocks(); assertThat(operator.getOutput(), equalTo(page4)); page4.releaseBlocks(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java index 86655bd3b7f73..27083ea0fcd13 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java @@ -20,22 +20,34 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.equalTo; public class DriverProfileTests extends AbstractWireSerializingTestCase { public void testToXContent() { DriverProfile status = new DriverProfile( + 123413220000L, + 123413243214L, 10012, 10000, 12, List.of( new DriverStatus.OperatorStatus("LuceneSource", LuceneSourceOperatorStatusTests.simple()), new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) + ), + new DriverSleeps( + Map.of("driver time", 1L), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)) ) ); assertThat(Strings.toString(status, true, true), equalTo(""" { + "start" : "1973-11-29T09:27:00.000Z", + "start_millis" : 123413220000, + "stop" : "1973-11-29T09:27:23.214Z", + "stop_millis" : 123413243214, "took_nanos" : 10012, "took_time" : "10micros", "cpu_nanos" : 10000, @@ -54,7 +66,30 @@ public void testToXContent() { """.stripTrailing() + " " + ValuesSourceReaderOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ } - ] + ], + "sleeps" : { + "counts" : { + "driver time" : 1 + }, + "first" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ], + "last" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ] + } }""")); } @@ -69,24 +104,33 @@ protected DriverProfile createTestInstance() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - DriverStatusTests.randomOperatorStatuses() + randomNonNegativeLong(), + randomNonNegativeLong(), + DriverStatusTests.randomOperatorStatuses(), + DriverSleepsTests.randomDriverSleeps() ); } @Override protected DriverProfile mutateInstance(DriverProfile instance) throws IOException { + long startMillis = instance.startMillis(); + long stopMillis = instance.stopMillis(); long tookNanos = instance.tookNanos(); long cpuNanos = instance.cpuNanos(); long iterations = instance.iterations(); var operators = instance.operators(); - switch (between(0, 3)) { - case 0 -> tookNanos = randomValueOtherThan(tookNanos, ESTestCase::randomNonNegativeLong); - case 1 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); - case 2 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); - case 3 -> operators = randomValueOtherThan(operators, DriverStatusTests::randomOperatorStatuses); + var sleeps = instance.sleeps(); + switch (between(0, 6)) { + case 0 -> startMillis = randomValueOtherThan(startMillis, ESTestCase::randomNonNegativeLong); + case 1 -> stopMillis = randomValueOtherThan(startMillis, ESTestCase::randomNonNegativeLong); + case 2 -> tookNanos = randomValueOtherThan(tookNanos, ESTestCase::randomNonNegativeLong); + case 3 -> cpuNanos = randomValueOtherThan(cpuNanos, ESTestCase::randomNonNegativeLong); + case 4 -> iterations = randomValueOtherThan(iterations, ESTestCase::randomNonNegativeLong); + case 5 -> operators = randomValueOtherThan(operators, DriverStatusTests::randomOperatorStatuses); + case 6 -> sleeps = randomValueOtherThan(sleeps, DriverSleepsTests::randomDriverSleeps); default -> throw new UnsupportedOperationException(); } - return new DriverProfile(tookNanos, cpuNanos, iterations, operators); + return new DriverProfile(startMillis, stopMillis, tookNanos, cpuNanos, iterations, operators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java new file mode 100644 index 0000000000000..a0d956fcd6f6f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverSleepsTests.java @@ -0,0 +1,240 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.hamcrest.core.IsEqual.equalTo; + +public class DriverSleepsTests extends AbstractWireSerializingTestCase { + public static DriverSleeps randomDriverSleeps() { + return randomDriverSleeps(between(0, DriverSleeps.RECORDS * 3)); + } + + private static DriverSleeps randomDriverSleeps(int cycles) { + DriverSleeps sleeps = DriverSleeps.empty(); + long now = 0; + for (int i = 0; i < cycles; i++) { + now += between(1, 100000); + sleeps = sleeps.sleep(randomSleepReason(), now); + if (i != cycles - 1 || randomBoolean()) { + // Randomly don't wake on the last sleep + now += between(1, 100000); + sleeps = sleeps.wake(now); + } + } + return sleeps; + } + + private static String randomSleepReason() { + return randomFrom("driver time", "driver iteration", "exchange empty", "exchange full"); + } + + public void testEmptyToXContent() { + assertThat(Strings.toString(DriverSleeps.empty(), true, true), equalTo(""" + { + "counts" : { }, + "first" : [ ], + "last" : [ ] + }""")); + } + + public void testSleepingToXContent() { + assertThat(Strings.toString(DriverSleeps.empty().sleep("driver iterations", 1723555763000L), true, true), equalTo(""" + { + "counts" : { + "driver iterations" : 1 + }, + "first" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000 + } + ], + "last" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000 + } + ] + }""")); + } + + public void testWakingToXContent() { + assertThat( + Strings.toString(DriverSleeps.empty().sleep("driver iterations", 1723555763000L).wake(1723555863000L), true, true), + equalTo(""" + { + "counts" : { + "driver iterations" : 1 + }, + "first" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000, + "wake" : "2024-08-13T13:31:03.000Z", + "wake_millis" : 1723555863000 + } + ], + "last" : [ + { + "reason" : "driver iterations", + "sleep" : "2024-08-13T13:29:23.000Z", + "sleep_millis" : 1723555763000, + "wake" : "2024-08-13T13:31:03.000Z", + "wake_millis" : 1723555863000 + } + ] + }""") + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return DriverSleeps::read; + } + + @Override + protected DriverSleeps createTestInstance() { + return randomDriverSleeps(); + } + + @Override + protected DriverSleeps mutateInstance(DriverSleeps instance) throws IOException { + if (instance.last().isEmpty()) { + return instance.sleep(randomSleepReason(), between(1, 10000)); + } + DriverSleeps.Sleep last = instance.last().get(instance.last().size() - 1); + if (last.isStillSleeping()) { + return instance.wake(last.sleep() + between(1, 10000)); + } + return instance.sleep(randomSleepReason(), last.wake() + between(1, 10000)); + } + + public void testTracking() throws IOException { + long now = 0; + DriverSleeps sleeps = DriverSleeps.empty(); + + Map expectedCounts = new TreeMap<>(); + List expectedFirst = new ArrayList<>(); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + + /* + * Simulate sleeping and waking when the records aren't full. + * New sleeps and wakes should show up in both the "first" and "last" fields. + */ + for (int i = 0; i < DriverSleeps.RECORDS; i++) { + now++; + String reason = randomSleepReason(); + expectedCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + + sleeps = sleeps.sleep(reason, now); + expectedFirst.add(new DriverSleeps.Sleep(reason, now, 0)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedFirst); + + now++; + sleeps = sleeps.wake(now); + expectedFirst.set(expectedFirst.size() - 1, new DriverSleeps.Sleep(reason, now - 1, now)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedFirst))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedFirst); + } + + /* + * Simulate sleeping and waking when the records are full. + * New sleeps and wakes should show up in only the "last" field. + */ + List expectedLast = new ArrayList<>(expectedFirst); + for (int i = 0; i < 1000; i++) { + now++; + String reason = randomSleepReason(); + expectedCounts.compute(reason, (k, v) -> v == null ? 1 : v + 1); + + sleeps = sleeps.sleep(reason, now); + expectedLast.remove(0); + expectedLast.add(new DriverSleeps.Sleep(reason, now, 0)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedLast))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedLast); + + now++; + sleeps = sleeps.wake(now); + expectedLast.set(expectedLast.size() - 1, new DriverSleeps.Sleep(reason, now - 1, now)); + assertThat(sleeps, equalTo(new DriverSleeps(expectedCounts, expectedFirst, expectedLast))); + assertXContent(sleeps, expectedCounts, expectedFirst, expectedLast); + } + } + + public void assertXContent( + DriverSleeps sleeps, + Map expectedCounts, + List expectedFirst, + List expectedLast + ) throws IOException { + try (BytesStreamOutput expected = new BytesStreamOutput()) { + try (XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), expected).prettyPrint().humanReadable(true)) { + b.startObject(); + b.startObject("counts"); + { + for (Map.Entry e : expectedCounts.entrySet()) { + b.field(e.getKey(), e.getValue()); + } + } + b.endObject(); + { + b.startArray("first"); + for (DriverSleeps.Sleep sleep : expectedFirst) { + sleep.toXContent(b, ToXContent.EMPTY_PARAMS); + } + b.endArray(); + } + { + b.startArray("last"); + for (DriverSleeps.Sleep sleep : expectedLast) { + sleep.toXContent(b, ToXContent.EMPTY_PARAMS); + } + b.endArray(); + } + b.endObject(); + } + assertThat(Strings.toString(sleeps, true, true), equalTo(expected.bytes().utf8ToString())); + } + } + + public void testWakeNeverSlept() { + Exception e = expectThrows(IllegalStateException.class, () -> DriverSleeps.empty().wake(1)); + assertThat(e.getMessage(), equalTo("Never slept.")); + } + + public void testWakeWhileAwake() { + Exception e = expectThrows(IllegalStateException.class, () -> DriverSleeps.empty().sleep(randomSleepReason(), 1).wake(2).wake(3)); + assertThat(e.getMessage(), equalTo("Already awake.")); + } + + public void testSleepWhileSleeping() { + Exception e = expectThrows( + IllegalStateException.class, + () -> DriverSleeps.empty().sleep(randomSleepReason(), 1).sleep(randomSleepReason(), 2) + ); + assertThat(e.getMessage(), equalTo("Still sleeping.")); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java index e82cbb831cff2..b46d9f3f4add7 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +41,12 @@ public void testToXContent() { new DriverStatus.OperatorStatus("LuceneSource", LuceneSourceOperatorStatusTests.simple()), new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) ), - List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())) + List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())), + new DriverSleeps( + Map.of("driver time", 1L), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)), + List.of(new DriverSleeps.Sleep("driver time", 1, 1)) + ) ); assertThat(Strings.toString(status, true, true), equalTo(""" { @@ -72,7 +78,30 @@ public void testToXContent() { """.stripTrailing() + " " + ExchangeSinkOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ } - ] + ], + "sleeps" : { + "counts" : { + "driver time" : 1 + }, + "first" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ], + "last" : [ + { + "reason" : "driver time", + "sleep" : "1970-01-01T00:00:00.001Z", + "sleep_millis" : 1, + "wake" : "1970-01-01T00:00:00.001Z", + "wake_millis" : 1 + } + ] + } }""")); } @@ -91,7 +120,8 @@ protected DriverStatus createTestInstance() { randomNonNegativeLong(), randomStatus(), randomOperatorStatuses(), - randomOperatorStatuses() + randomOperatorStatuses(), + DriverSleepsTests.randomDriverSleeps() ); } @@ -127,7 +157,8 @@ protected DriverStatus mutateInstance(DriverStatus instance) throws IOException var status = instance.status(); var completedOperators = instance.completedOperators(); var activeOperators = instance.activeOperators(); - switch (between(0, 7)) { + var sleeps = instance.sleeps(); + switch (between(0, 8)) { case 0 -> sessionId = randomValueOtherThan(sessionId, this::randomSessionId); case 1 -> started = randomValueOtherThan(started, ESTestCase::randomNonNegativeLong); case 2 -> lastUpdated = randomValueOtherThan(lastUpdated, ESTestCase::randomNonNegativeLong); @@ -136,9 +167,10 @@ protected DriverStatus mutateInstance(DriverStatus instance) throws IOException case 5 -> status = randomValueOtherThan(status, this::randomStatus); case 6 -> completedOperators = randomValueOtherThan(completedOperators, DriverStatusTests::randomOperatorStatuses); case 7 -> activeOperators = randomValueOtherThan(activeOperators, DriverStatusTests::randomOperatorStatuses); + case 8 -> sleeps = randomValueOtherThan(sleeps, DriverSleepsTests::randomDriverSleeps); default -> throw new UnsupportedOperationException(); } - return new DriverStatus(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators); + return new DriverStatus(sessionId, started, lastUpdated, cpuNanos, iterations, status, completedOperators, activeOperators, sleeps); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 3f958464656e0..ab785e739d080 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -99,40 +99,40 @@ public void testBasic() throws Exception { sourceExchanger.addCompletionListener(sourceCompletion); ExchangeSource source = sourceExchanger.createExchangeSource(); sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1); - SubscribableListener waitForReading = source.waitForReading(); + SubscribableListener waitForReading = source.waitForReading().listener(); assertFalse(waitForReading.isDone()); assertNull(source.pollPage()); - assertTrue(sink1.waitForWriting().isDone()); + assertTrue(sink1.waitForWriting().listener().isDone()); randomFrom(sink1, sink2).addPage(pages[0]); randomFrom(sink1, sink2).addPage(pages[1]); // source and sink buffers can store 5 pages for (Page p : List.of(pages[2], pages[3], pages[4])) { ExchangeSink sink = randomFrom(sink1, sink2); - assertBusy(() -> assertTrue(sink.waitForWriting().isDone())); + assertBusy(() -> assertTrue(sink.waitForWriting().listener().isDone())); sink.addPage(p); } // sink buffer is full - assertFalse(randomFrom(sink1, sink2).waitForWriting().isDone()); - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertFalse(randomFrom(sink1, sink2).waitForWriting().listener().isDone()); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[0], source.pollPage()); - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[1], source.pollPage()); // sink can write again - assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().isDone())); + assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().listener().isDone())); randomFrom(sink1, sink2).addPage(pages[5]); - assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().isDone())); + assertBusy(() -> assertTrue(randomFrom(sink1, sink2).waitForWriting().listener().isDone())); randomFrom(sink1, sink2).addPage(pages[6]); // sink buffer is full - assertFalse(randomFrom(sink1, sink2).waitForWriting().isDone()); + assertFalse(randomFrom(sink1, sink2).waitForWriting().listener().isDone()); sink1.finish(); assertTrue(sink1.isFinished()); for (int i = 0; i < 5; i++) { - assertBusy(() -> assertTrue(source.waitForReading().isDone())); + assertBusy(() -> assertTrue(source.waitForReading().listener().isDone())); assertEquals(pages[2 + i], source.pollPage()); } // source buffer is empty - assertFalse(source.waitForReading().isDone()); - assertBusy(() -> assertTrue(sink2.waitForWriting().isDone())); + assertFalse(source.waitForReading().listener().isDone()); + assertBusy(() -> assertTrue(sink2.waitForWriting().listener().isDone())); sink2.finish(); assertTrue(sink2.isFinished()); assertTrue(source.isFinished()); @@ -356,13 +356,13 @@ public void testEarlyTerminate() { ExchangeSink sink = sinkExchanger.createExchangeSink(); sink.addPage(p1); sink.addPage(p2); - assertFalse(sink.waitForWriting().isDone()); + assertFalse(sink.waitForWriting().listener().isDone()); PlainActionFuture future = new PlainActionFuture<>(); sinkExchanger.fetchPageAsync(true, future); ExchangeResponse resp = future.actionGet(); assertTrue(resp.finished()); assertNull(resp.takePage()); - assertTrue(sink.waitForWriting().isDone()); + assertTrue(sink.waitForWriting().listener().isDone()); assertTrue(sink.isFinished()); } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index d679ee18d0a73..b0fa233965da6 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.elasticsearch.Build; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; @@ -21,15 +22,19 @@ import org.elasticsearch.test.TestClustersThreadFilter; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; import org.hamcrest.Matchers; +import org.junit.Assert; import org.junit.ClassRule; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import static org.elasticsearch.test.ListMatcher.matchesList; @@ -285,15 +290,11 @@ public void testProfile() throws IOException { .entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) ); - MapMatcher commonProfile = matchesMap().entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) - .entry("operators", instanceOf(List.class)); List> signatures = new ArrayList<>(); @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { - assertThat(p, commonProfile); + assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") List> operators = (List>) p.get("operators"); @@ -348,15 +349,11 @@ public void testInlineStatsProfile() throws IOException { ).entry("values", values).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) ); - MapMatcher commonProfile = matchesMap().entry("iterations", greaterThan(0)) - .entry("cpu_nanos", greaterThan(0)) - .entry("took_nanos", greaterThan(0)) - .entry("operators", instanceOf(List.class)); List> signatures = new ArrayList<>(); @SuppressWarnings("unchecked") List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); for (Map p : profiles) { - assertThat(p, commonProfile); + assertThat(p, commonProfile()); List sig = new ArrayList<>(); @SuppressWarnings("unchecked") List> operators = (List>) p.get("operators"); @@ -398,6 +395,115 @@ public void testInlineStatsProfile() throws IOException { ); } + public void testForceSleepsProfile() throws IOException { + assumeTrue("requires pragmas", Build.current().isSnapshot()); + + Request createIndex = new Request("PUT", testIndexName()); + createIndex.setJsonEntity(""" + { + "settings": { + "index": { + "number_of_shards": 1 + } + } + }"""); + Response response = client().performRequest(createIndex); + assertThat( + entityToMap(response.getEntity(), XContentType.JSON), + matchesMap().entry("shards_acknowledged", true).entry("index", testIndexName()).entry("acknowledged", true) + ); + + int groupCount = 300; + for (int group1 = 0; group1 < groupCount; group1++) { + StringBuilder b = new StringBuilder(); + for (int group2 = 0; group2 < groupCount; group2++) { + b.append(String.format(Locale.ROOT, """ + {"create":{"_index":"%s"}} + {"@timestamp":"2020-12-12","value":1,"group1":%d,"group2":%d} + """, testIndexName(), group1, group2)); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.addParameter("filter_path", "errors"); + bulk.setJsonEntity(b.toString()); + response = client().performRequest(bulk); + Assert.assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); + } + + RequestObjectBuilder builder = requestObjectBuilder().query( + fromIndex() + " | STATS AVG(value), MAX(value), MIN(value) BY group1, group2 | SORT group1, group2 ASC | LIMIT 10" + ); + // Lock to shard level partitioning, so we get consistent profile output + builder.pragmas(Settings.builder().put("data_partitioning", "shard").put("page_size", 10).build()); + builder.profile(true); + Map result = runEsql(builder); + List> expectedValues = new ArrayList<>(); + for (int group2 = 0; group2 < 10; group2++) { + expectedValues.add(List.of(1.0, 1, 1, 0, group2)); + } + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "AVG(value)").entry("type", "double")) + .item(matchesMap().entry("name", "MAX(value)").entry("type", "long")) + .item(matchesMap().entry("name", "MIN(value)").entry("type", "long")) + .item(matchesMap().entry("name", "group1").entry("type", "long")) + .item(matchesMap().entry("name", "group2").entry("type", "long")) + ).entry("values", expectedValues).entry("profile", matchesMap().entry("drivers", instanceOf(List.class))) + ); + + @SuppressWarnings("unchecked") + List> profiles = (List>) ((Map) result.get("profile")).get("drivers"); + + for (Map p : profiles) { + assertMap(p, commonProfile()); + @SuppressWarnings("unchecked") + Map sleeps = (Map) p.get("sleeps"); + String operators = p.get("operators").toString(); + MapMatcher sleepMatcher = matchesMap().entry("reason", "exchange empty") + .entry("sleep_millis", greaterThan(0L)) + .entry("wake_millis", greaterThan(0L)); + if (operators.contains("LuceneSourceOperator")) { + assertMap(sleeps, matchesMap().entry("counts", Map.of()).entry("first", List.of()).entry("last", List.of())); + } else if (operators.contains("ExchangeSourceOperator")) { + if (operators.contains("ExchangeSinkOperator")) { + assertMap(sleeps, matchesMap().entry("counts", matchesMap().entry("exchange empty", greaterThan(0))).extraOk()); + @SuppressWarnings("unchecked") + List> first = (List>) sleeps.get("first"); + for (Map s : first) { + assertMap(s, sleepMatcher); + } + @SuppressWarnings("unchecked") + List> last = (List>) sleeps.get("last"); + for (Map s : last) { + assertMap(s, sleepMatcher); + } + + } else { + assertMap( + sleeps, + matchesMap().entry("counts", matchesMap().entry("exchange empty", 1)) + .entry("first", List.of(sleepMatcher)) + .entry("last", List.of(sleepMatcher)) + ); + } + } else { + fail("unknown signature: " + operators); + } + } + } + + private MapMatcher commonProfile() { + return matchesMap().entry("start_millis", greaterThan(0L)) + .entry("stop_millis", greaterThan(0L)) + .entry("iterations", greaterThan(0)) + .entry("cpu_nanos", greaterThan(0)) + .entry("took_nanos", greaterThan(0)) + .entry("operators", instanceOf(List.class)) + .entry("sleeps", matchesMap().extraOk()); + } + private String checkOperatorProfile(Map o) { String name = (String) o.get("operator"); name = name.replaceAll("\\[.+", ""); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java index 782e1fb4333d8..2f3aa09868637 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseProfileTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -51,7 +52,10 @@ private DriverProfile randomDriverProfile() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomList(10, this::randomOperatorStatus) + randomNonNegativeLong(), + randomNonNegativeLong(), + randomList(10, this::randomOperatorStatus), + DriverSleeps.empty() ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index e7f539026498b..9d4a1c21c5995 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; @@ -479,10 +480,13 @@ public void testProfileXContent() { new EsqlQueryResponse.Profile( List.of( new DriverProfile( + 1723489812649L, + 1723489819929L, 20021, 20000, 12, - List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))) + List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10021, 10))), + DriverSleeps.empty() ) ) ), @@ -509,6 +513,8 @@ public void testProfileXContent() { "profile" : { "drivers" : [ { + "start_millis" : 1723489812649, + "stop_millis" : 1723489819929, "took_nanos" : 20021, "cpu_nanos" : 20000, "iterations" : 12, @@ -520,7 +526,12 @@ public void testProfileXContent() { "pages_processed" : 10 } } - ] + ], + "sleeps" : { + "counts" : { }, + "first" : [ ], + "last" : [ ] + } } ] } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java index c93f3b9e0e350..26529a3605d38 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ComputeListenerTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.compute.operator.DriverSleeps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskCancellationService; @@ -92,7 +93,17 @@ private ComputeResponse randomResponse() { int numProfiles = randomIntBetween(0, 2); List profiles = new ArrayList<>(numProfiles); for (int i = 0; i < numProfiles; i++) { - profiles.add(new DriverProfile(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), List.of())); + profiles.add( + new DriverProfile( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + List.of(), + DriverSleeps.empty() + ) + ); } return new ComputeResponse(profiles); } From 69293e28dc6d3237796ada6d12c75c84c73a1a29 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 19 Aug 2024 16:31:59 -0700 Subject: [PATCH 12/20] Use systemd socket directly instead of libsystemd (#111131) The libsystemd library function sd_notify is just a thin wrapper around opeing and writing to a unix filesystem socket. This commit replaces using libsystemd with opening the socket provided by systemd directly. relates #86475 --- .../nativeaccess/jna/JnaPosixCLibrary.java | 41 ++++++++++ .../nativeaccess/LinuxNativeAccess.java | 11 ++- .../elasticsearch/nativeaccess/Systemd.java | 81 ++++++++++++++++--- .../nativeaccess/lib/PosixCLibrary.java | 59 +++++++++++++- .../nativeaccess/jdk/JdkPosixCLibrary.java | 64 +++++++++++++++ .../nativeaccess/jdk/MemorySegmentUtil.java | 4 + .../nativeaccess/jdk/MemorySegmentUtil.java | 4 + 7 files changed, 248 insertions(+), 16 deletions(-) diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java index d984d239e0b39..82a69e4864d94 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaPosixCLibrary.java @@ -16,6 +16,7 @@ import com.sun.jna.Pointer; import com.sun.jna.Structure; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Arrays; @@ -109,6 +110,16 @@ public long bytesalloc() { } } + public static class JnaSockAddr implements SockAddr { + final Memory memory; + + JnaSockAddr(String path) { + this.memory = new Memory(110); + memory.setShort(0, AF_UNIX); + memory.setString(2, path, "UTF-8"); + } + } + private interface NativeFunctions extends Library { int geteuid(); @@ -126,6 +137,12 @@ private interface NativeFunctions extends Library { int close(int fd); + int socket(int domain, int type, int protocol); + + int connect(int sockfd, Pointer addr, int addrlen); + + long send(int sockfd, Pointer buf, long buflen, int flags); + String strerror(int errno); } @@ -235,6 +252,30 @@ public int fstat64(int fd, Stat64 stats) { return fstat64.fstat64(fd, jnaStats.memory); } + @Override + public int socket(int domain, int type, int protocol) { + return functions.socket(domain, type, protocol); + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JnaSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JnaSockAddr; + var jnaAddr = (JnaSockAddr) addr; + return functions.connect(sockfd, jnaAddr.memory, (int) jnaAddr.memory.size()); + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JnaCloseableByteBuffer; + var nativeBuffer = (JnaCloseableByteBuffer) buffer; + return functions.send(sockfd, nativeBuffer.memory, nativeBuffer.buffer().remaining(), flags); + } + @Override public String strerror(int errno) { return functions.strerror(errno); diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java index f6e6035a8aba6..e1ea28e8786f5 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/LinuxNativeAccess.java @@ -12,7 +12,7 @@ import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFProg; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary.SockFilter; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.util.Map; @@ -92,7 +92,14 @@ record Arch( LinuxNativeAccess(NativeLibraryProvider libraryProvider) { super("Linux", libraryProvider, new PosixConstants(-1L, 9, 1, 8, 64, 144, 48, 64)); this.linuxLibc = libraryProvider.getLibrary(LinuxCLibrary.class); - this.systemd = new Systemd(libraryProvider.getLibrary(SystemdLibrary.class)); + String socketPath = System.getenv("NOTIFY_SOCKET"); + if (socketPath == null) { + this.systemd = null; // not running under systemd + } else { + logger.debug("Systemd socket path: {}", socketPath); + var buffer = newBuffer(64); + this.systemd = new Systemd(libraryProvider.getLibrary(PosixCLibrary.class), socketPath, buffer); + } } @Override diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java index 4deade118b788..058cfe77b1ff3 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/Systemd.java @@ -10,17 +10,28 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.nativeaccess.lib.SystemdLibrary; +import org.elasticsearch.nativeaccess.lib.PosixCLibrary; -import java.util.Locale; +import java.nio.charset.StandardCharsets; +/** + * Wraps access to notifications to systemd. + *

+ * Systemd notifications are done through a Unix socket. Although Java does support + * opening unix sockets, it unfortunately does not support datagram sockets. This class + * instead opens and communicates with the socket using native methods. + */ public class Systemd { private static final Logger logger = LogManager.getLogger(Systemd.class); - private final SystemdLibrary lib; + private final PosixCLibrary libc; + private final String socketPath; + private final CloseableByteBuffer buffer; - Systemd(SystemdLibrary lib) { - this.lib = lib; + Systemd(PosixCLibrary libc, String socketPath, CloseableByteBuffer buffer) { + this.libc = libc; + this.socketPath = socketPath; + this.buffer = buffer; } /** @@ -41,15 +52,61 @@ public void notify_stopping() { } private void notify(String state, boolean warnOnError) { - int rc = lib.sd_notify(0, state); - logger.trace("sd_notify({}, {}) returned [{}]", 0, state, rc); - if (rc < 0) { - String message = String.format(Locale.ROOT, "sd_notify(%d, %s) returned error [%d]", 0, state, rc); - if (warnOnError) { - logger.warn(message); + int sockfd = libc.socket(PosixCLibrary.AF_UNIX, PosixCLibrary.SOCK_DGRAM, 0); + if (sockfd < 0) { + throwOrLog("Could not open systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + RuntimeException error = null; + try { + var sockAddr = libc.newUnixSockAddr(socketPath); + if (libc.connect(sockfd, sockAddr) != 0) { + throwOrLog("Could not connect to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + return; + } + + byte[] bytes = state.getBytes(StandardCharsets.US_ASCII); + final long bytesSent; + synchronized (buffer) { + buffer.buffer().clear(); + buffer.buffer().put(0, bytes); + buffer.buffer().limit(bytes.length); + bytesSent = libc.send(sockfd, buffer, 0); + } + + if (bytesSent == -1) { + throwOrLog("Failed to send message (" + state + ") to systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } else if (bytesSent != bytes.length) { + throwOrLog("Not all bytes of message (" + state + ") sent to systemd socket (sent " + bytesSent + ")", warnOnError); } else { - throw new RuntimeException(message); + logger.trace("Message (" + state + ") sent to systemd"); + } + } catch (RuntimeException e) { + error = e; + } finally { + if (libc.close(sockfd) != 0) { + try { + throwOrLog("Could not close systemd socket: " + libc.strerror(libc.errno()), warnOnError); + } catch (RuntimeException e) { + if (error != null) { + error.addSuppressed(e); + throw error; + } else { + throw e; + } + } + } else if (error != null) { + throw error; } } } + + private void throwOrLog(String message, boolean warnOnError) { + if (warnOnError) { + logger.warn(message); + } else { + logger.error(message); + throw new RuntimeException(message); + } + } } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java index 0e7d07d0ad623..ac34fcb23b3eb 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java @@ -8,11 +8,19 @@ package org.elasticsearch.nativeaccess.lib; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; + /** * Provides access to methods in libc.so available on POSIX systems. */ public non-sealed interface PosixCLibrary extends NativeLibrary { + /** socket domain indicating unix file socket */ + short AF_UNIX = 1; + + /** socket type indicating a datagram-oriented socket */ + int SOCK_DGRAM = 2; + /** * Gets the effective userid of the current process. * @@ -68,8 +76,6 @@ interface Stat64 { int open(String pathname, int flags); - int close(int fd); - int fstat64(int fd, Stat64 stats); int ftruncate(int fd, long length); @@ -90,6 +96,55 @@ interface FStore { int fcntl(int fd, int cmd, FStore fst); + /** + * Open a file descriptor to connect to a socket. + * + * @param domain The socket protocol family, eg AF_UNIX + * @param type The socket type, eg SOCK_DGRAM + * @param protocol The protocol for the given protocl family, normally 0 + * @return an open file descriptor, or -1 on failure with errno set + * @see socket manpage + */ + int socket(int domain, int type, int protocol); + + /** + * Marker interface for sockaddr struct implementations. + */ + interface SockAddr {} + + /** + * Create a sockaddr for the AF_UNIX family. + */ + SockAddr newUnixSockAddr(String path); + + /** + * Connect a socket to an address. + * + * @param sockfd An open socket file descriptor + * @param addr The address to connect to + * @return 0 on success, -1 on failure with errno set + */ + int connect(int sockfd, SockAddr addr); + + /** + * Send a message to a socket. + * + * @param sockfd The open socket file descriptor + * @param buffer The message bytes to send + * @param flags Flags that may adjust how the message is sent + * @return The number of bytes sent, or -1 on failure with errno set + * @see send manpage + */ + long send(int sockfd, CloseableByteBuffer buffer, int flags); + + /** + * Close a file descriptor + * @param fd The file descriptor to close + * @return 0 on success, -1 on failure with errno set + * @see close manpage + */ + int close(int fd); + /** * Return a string description for an error. * diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java index 7affd0614461d..f5e3132b76b56 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkPosixCLibrary.java @@ -10,6 +10,7 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.nativeaccess.CloseableByteBuffer; import org.elasticsearch.nativeaccess.lib.PosixCLibrary; import java.lang.foreign.Arena; @@ -24,8 +25,10 @@ import static java.lang.foreign.MemoryLayout.PathElement.groupElement; import static java.lang.foreign.ValueLayout.ADDRESS; +import static java.lang.foreign.ValueLayout.JAVA_BYTE; import static java.lang.foreign.ValueLayout.JAVA_INT; import static java.lang.foreign.ValueLayout.JAVA_LONG; +import static java.lang.foreign.ValueLayout.JAVA_SHORT; import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle; import static org.elasticsearch.nativeaccess.jdk.MemorySegmentUtil.varHandleWithoutOffset; @@ -89,6 +92,18 @@ class JdkPosixCLibrary implements PosixCLibrary { } fstat$mh = fstat; } + private static final MethodHandle socket$mh = downcallHandleWithErrno( + "socket", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, JAVA_INT, JAVA_INT) + ); + private static final MethodHandle connect$mh = downcallHandleWithErrno( + "connect", + FunctionDescriptor.of(JAVA_INT, JAVA_INT, ADDRESS, JAVA_INT) + ); + private static final MethodHandle send$mh = downcallHandleWithErrno( + "send", + FunctionDescriptor.of(JAVA_LONG, JAVA_INT, ADDRESS, JAVA_LONG, JAVA_INT) + ); static final MemorySegment errnoState = Arena.ofAuto().allocate(CAPTURE_ERRNO_LAYOUT); @@ -226,6 +241,44 @@ public int fstat64(int fd, Stat64 stat64) { } } + @Override + public int socket(int domain, int type, int protocol) { + try { + return (int) socket$mh.invokeExact(errnoState, domain, type, protocol); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public SockAddr newUnixSockAddr(String path) { + return new JdkSockAddr(path); + } + + @Override + public int connect(int sockfd, SockAddr addr) { + assert addr instanceof JdkSockAddr; + var jdkAddr = (JdkSockAddr) addr; + try { + return (int) connect$mh.invokeExact(errnoState, sockfd, jdkAddr.segment, (int) jdkAddr.segment.byteSize()); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + + @Override + public long send(int sockfd, CloseableByteBuffer buffer, int flags) { + assert buffer instanceof JdkCloseableByteBuffer; + var nativeBuffer = (JdkCloseableByteBuffer) buffer; + var segment = nativeBuffer.segment; + try { + logger.info("Sending {} bytes to socket", buffer.buffer().remaining()); + return (long) send$mh.invokeExact(errnoState, sockfd, segment, (long) buffer.buffer().remaining(), flags); + } catch (Throwable t) { + throw new AssertionError(t); + } + } + static class JdkRLimit implements RLimit { private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_LONG, JAVA_LONG); private static final VarHandle rlim_cur$vh = varHandleWithoutOffset(layout, groupElement(0)); @@ -326,4 +379,15 @@ public long bytesalloc() { return (long) st_bytesalloc$vh.get(segment); } } + + private static class JdkSockAddr implements SockAddr { + private static final MemoryLayout layout = MemoryLayout.structLayout(JAVA_SHORT, MemoryLayout.sequenceLayout(108, JAVA_BYTE)); + final MemorySegment segment; + + JdkSockAddr(String path) { + segment = Arena.ofAuto().allocate(layout); + segment.set(JAVA_SHORT, 0, AF_UNIX); + MemorySegmentUtil.setString(segment, 2, path); + } + } } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index c65711af0f63f..6c4c9bd0111c0 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -22,6 +22,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getUtf8String(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setUtf8String(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateUtf8String(s); } diff --git a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java index 25c449337e294..23d9919603ab4 100644 --- a/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java +++ b/libs/native/src/main22/java/org/elasticsearch/nativeaccess/jdk/MemorySegmentUtil.java @@ -20,6 +20,10 @@ static String getString(MemorySegment segment, long offset) { return segment.getString(offset); } + static void setString(MemorySegment segment, long offset, String value) { + segment.setString(offset, value); + } + static MemorySegment allocateString(Arena arena, String s) { return arena.allocateFrom(s); } From d03c197df257f867d58f72c9b8f872f31ab133be Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 20 Aug 2024 06:11:26 +0000 Subject: [PATCH 13/20] [Automated] Update Lucene snapshot to 9.12.0-snapshot-25253a1a016 --- build-tools-internal/version.properties | 2 +- gradle/verification-metadata.xml | 150 ++++++++++++------------ 2 files changed, 76 insertions(+), 76 deletions(-) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 99a135480b97b..f108b9f7fc4d7 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.16.0 -lucene = 9.12.0-snapshot-a9a70fa97cc +lucene = 9.12.0-snapshot-25253a1a016 bundled_jdk_vendor = openjdk bundled_jdk = 22.0.1+8@c7ec1332f7bb44aeba2eb341ae18aca4 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 2d030e02c265e..de1db00b952b7 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2816,129 +2816,129 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + From 1047453b1a287046407eb8c7c84bba85c2312beb Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Aug 2024 07:24:21 +0100 Subject: [PATCH 14/20] Improve interrupt handling in tests (#111957) The test utilities `waitUntil()`, `indexRandom()`, `startInParallel()` and `runInParallel()` all declare `InterruptedException` amongst the checked exceptions they throw, but in practice there's nothing useful to do with such an exception except to fail the test. With this change we handle the interrupt within the utility methods instead, avoiding exception-handling noise in callers. --- .../node/tasks/CancellableTasksTests.java | 14 ++- .../cluster/node/tasks/TestTaskPlugin.java | 20 ++--- .../elasticsearch/test/ESIntegTestCase.java | 48 +++++------ .../org/elasticsearch/test/ESTestCase.java | 41 +++++---- .../test/InternalTestCluster.java | 6 +- .../SearchableSnapshotsIntegTests.java | 8 +- .../SessionFactoryLoadBalancingTests.java | 85 +++++++++---------- 7 files changed, 100 insertions(+), 122 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index e541fef65a0f9..64b9b4f0b69d8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -158,15 +158,11 @@ protected NodeResponse nodeOperation(CancellableNodeRequest request, Task task) if (shouldBlock) { // Simulate a job that takes forever to finish // Using periodic checks method to identify that the task was cancelled - try { - waitUntil(() -> { - ((CancellableTask) task).ensureNotCancelled(); - return false; - }); - fail("It should have thrown an exception"); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + ((CancellableTask) task).ensureNotCancelled(); + return false; + }); + fail("It should have thrown an exception"); } debugDelay("op4"); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 16392b3f59baa..903ecfe2b2aa7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -283,16 +283,12 @@ protected void doExecute(Task task, NodesRequest request, ActionListener { - if (((CancellableTask) task).isCancelled()) { - throw new RuntimeException("Cancelled!"); - } - return ((TestTask) task).isBlocked() == false; - }); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + if (((CancellableTask) task).isCancelled()) { + throw new RuntimeException("Cancelled!"); + } + return ((TestTask) task).isBlocked() == false; + }); } logger.info("Test task finished on the node {}", clusterService.localNode()); return new NodeResponse(clusterService.localNode()); @@ -301,9 +297,7 @@ protected NodeResponse nodeOperation(NodeRequest request, Task task) { public static class UnblockTestTaskResponse implements Writeable { - UnblockTestTaskResponse() { - - } + UnblockTestTaskResponse() {} UnblockTestTaskResponse(StreamInput in) {} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index fa686a0bc753a..cf469546b6f63 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1192,23 +1192,19 @@ public static List> findTasks(Cl @Nullable public static DiscoveryNode waitAndGetHealthNode(InternalTestCluster internalCluster) { DiscoveryNode[] healthNode = new DiscoveryNode[1]; - try { - waitUntil(() -> { - ClusterState state = internalCluster.client() - .admin() - .cluster() - .prepareState() - .clear() - .setMetadata(true) - .setNodes(true) - .get() - .getState(); - healthNode[0] = HealthNode.findHealthNode(state); - return healthNode[0] != null; - }, 15, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } + waitUntil(() -> { + ClusterState state = internalCluster.client() + .admin() + .cluster() + .prepareState() + .clear() + .setMetadata(true) + .setNodes(true) + .get() + .getState(); + healthNode[0] = HealthNode.findHealthNode(state); + return healthNode[0] != null; + }, 15, TimeUnit.SECONDS); return healthNode[0]; } @@ -1640,7 +1636,7 @@ protected static IndicesAdminClient indicesAdmin() { return admin().indices(); } - public void indexRandom(boolean forceRefresh, String index, int numDocs) throws InterruptedException { + public void indexRandom(boolean forceRefresh, String index, int numDocs) { IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { builders[i] = prepareIndex(index).setSource("field", "value"); @@ -1651,11 +1647,11 @@ public void indexRandom(boolean forceRefresh, String index, int numDocs) throws /** * Convenience method that forwards to {@link #indexRandom(boolean, List)}. */ - public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) { indexRandom(forceRefresh, Arrays.asList(builders)); } - public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) { indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders)); } @@ -1674,7 +1670,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexReque * @param builders the documents to index. * @see #indexRandom(boolean, boolean, java.util.List) */ - public void indexRandom(boolean forceRefresh, List builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, List builders) { indexRandom(forceRefresh, forceRefresh, builders); } @@ -1690,7 +1686,7 @@ public void indexRandom(boolean forceRefresh, List builders * all documents are indexed. This is useful to produce deleted documents on the server side. * @param builders the documents to index. */ - public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) { indexRandom(forceRefresh, dummyDocuments, true, builders); } @@ -1707,8 +1703,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List builders) - throws InterruptedException { + public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List builders) { Random random = random(); Set indices = new HashSet<>(); builders = new ArrayList<>(builders); @@ -1822,8 +1817,7 @@ private static CountDownLatch newLatch(List latches) { /** * Maybe refresh, force merge, or flush then always make sure there aren't too many in flight async operations. */ - private void postIndexAsyncActions(String[] indices, List inFlightAsyncOperations, boolean maybeFlush) - throws InterruptedException { + private void postIndexAsyncActions(String[] indices, List inFlightAsyncOperations, boolean maybeFlush) { if (rarely()) { if (rarely()) { indicesAdmin().prepareRefresh(indices) @@ -1843,7 +1837,7 @@ private void postIndexAsyncActions(String[] indices, List inFlig } while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) { int waitFor = between(0, inFlightAsyncOperations.size() - 1); - inFlightAsyncOperations.remove(waitFor).await(); + safeAwait(inFlightAsyncOperations.remove(waitFor)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 08709ff6459ce..58487d6552bcd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -213,6 +213,7 @@ import static org.hamcrest.Matchers.emptyCollectionOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.startsWith; /** @@ -1420,9 +1421,8 @@ public static void assertBusy(CheckedRunnable codeBlock, long maxWait * * @param breakSupplier determines whether to return immediately or continue waiting. * @return the last value returned by breakSupplier - * @throws InterruptedException if any sleep calls were interrupted. */ - public static boolean waitUntil(BooleanSupplier breakSupplier) throws InterruptedException { + public static boolean waitUntil(BooleanSupplier breakSupplier) { return waitUntil(breakSupplier, 10, TimeUnit.SECONDS); } @@ -1438,9 +1438,8 @@ public static boolean waitUntil(BooleanSupplier breakSupplier) throws Interrupte * @param maxWaitTime the maximum amount of time to wait * @param unit the unit of tie for maxWaitTime * @return the last value returned by breakSupplier - * @throws InterruptedException if any sleep calls were interrupted. */ - public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, TimeUnit unit) throws InterruptedException { + public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, TimeUnit unit) { long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit); long timeInMillis = 1; long sum = 0; @@ -1448,12 +1447,12 @@ public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, if (breakSupplier.getAsBoolean()) { return true; } - Thread.sleep(timeInMillis); + safeSleep(timeInMillis); sum += timeInMillis; timeInMillis = Math.min(AWAIT_BUSY_THRESHOLD, timeInMillis * 2); } timeInMillis = maxTimeInMillis - sum; - Thread.sleep(Math.max(timeInMillis, 0)); + safeSleep(Math.max(timeInMillis, 0)); return breakSupplier.getAsBoolean(); } @@ -2505,7 +2504,7 @@ public static T expectThrows(Class expectedType, Reques * Same as {@link #runInParallel(int, IntConsumer)} but also attempts to start all tasks at the same time by blocking execution on a * barrier until all threads are started and ready to execute their task. */ - public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) { final CyclicBarrier barrier = new CyclicBarrier(numberOfTasks); runInParallel(numberOfTasks, i -> { safeAwait(barrier); @@ -2519,7 +2518,7 @@ public static void startInParallel(int numberOfTasks, IntConsumer taskFactory) t * @param numberOfTasks number of tasks to run in parallel * @param taskFactory task factory */ - public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) throws InterruptedException { + public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) { final ArrayList> futures = new ArrayList<>(numberOfTasks); final Thread[] threads = new Thread[numberOfTasks - 1]; for (int i = 0; i < numberOfTasks; i++) { @@ -2534,16 +2533,26 @@ public static void runInParallel(int numberOfTasks, IntConsumer taskFactory) thr threads[i].start(); } } - for (Thread thread : threads) { - thread.join(); - } Exception e = null; - for (Future future : futures) { - try { - future.get(); - } catch (Exception ex) { - e = ExceptionsHelper.useOrSuppress(e, ex); + try { + for (Thread thread : threads) { + // no sense in waiting for the rest of the threads, nor any futures, if interrupted, just bail out and fail + thread.join(); + } + for (Future future : futures) { + try { + future.get(); + } catch (InterruptedException interruptedException) { + // no sense in waiting for the rest of the futures if interrupted, just bail out and fail + Thread.currentThread().interrupt(); + throw interruptedException; + } catch (Exception executionException) { + e = ExceptionsHelper.useOrSuppress(e, executionException); + } } + } catch (InterruptedException interruptedException) { + Thread.currentThread().interrupt(); + e = ExceptionsHelper.useOrSuppress(e, interruptedException); } if (e != null) { throw new AssertionError(e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 0b69245177c7a..332df7123fd1b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1744,11 +1744,7 @@ private synchronized void startAndPublishNodesAndClients(List nod .filter(nac -> nodes.containsKey(nac.name) == false) // filter out old masters .count(); rebuildUnicastHostFiles(nodeAndClients); // ensure that new nodes can find the existing nodes when they start - try { - runInParallel(nodeAndClients.size(), i -> nodeAndClients.get(i).startNode()); - } catch (InterruptedException e) { - throw new AssertionError("interrupted while starting nodes", e); - } + runInParallel(nodeAndClients.size(), i -> nodeAndClients.get(i).startNode()); nodeAndClients.forEach(this::publishNode); if (autoManageMasterNodes && newMasters > 0) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 56aec13cbab29..c99f2be0a6cad 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -371,12 +371,8 @@ public void testCanMountSnapshotTakenWhileConcurrentlyIndexing() throws Exceptio for (int i = between(10, 10_000); i >= 0; i--) { indexRequestBuilders.add(prepareIndex(indexName).setSource("foo", randomBoolean() ? "bar" : "baz")); } - try { - safeAwait(cyclicBarrier); - indexRandom(true, true, indexRequestBuilders); - } catch (InterruptedException e) { - throw new AssertionError(e); - } + safeAwait(cyclicBarrier); + indexRandom(true, true, indexRequestBuilders); refresh(indexName); assertThat( indicesAdmin().prepareForceMerge(indexName).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java index 466d0e3428d50..6abf6c81b673e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java @@ -401,59 +401,52 @@ private PortBlockingRunnable( public void run() { final List openedSockets = new ArrayList<>(); final List failedAddresses = new ArrayList<>(); - try { - final boolean allSocketsOpened = waitUntil(() -> { - try { - final InetAddress[] allAddresses; - if (serverAddress instanceof Inet4Address) { - allAddresses = NetworkUtils.getAllIPV4Addresses(); - } else { - allAddresses = NetworkUtils.getAllIPV6Addresses(); - } - final List inetAddressesToBind = Arrays.stream(allAddresses) - .filter(addr -> openedSockets.stream().noneMatch(s -> addr.equals(s.getLocalAddress()))) - .filter(addr -> failedAddresses.contains(addr) == false) - .collect(Collectors.toList()); - for (InetAddress localAddress : inetAddressesToBind) { - try { - final Socket socket = openMockSocket(serverAddress, serverPort, localAddress, portToBind); - openedSockets.add(socket); - logger.debug("opened socket [{}]", socket); - } catch (NoRouteToHostException | ConnectException e) { - logger.debug(() -> "marking address [" + localAddress + "] as failed due to:", e); - failedAddresses.add(localAddress); - } - } - if (openedSockets.size() == 0) { - logger.debug("Could not open any sockets from the available addresses"); - return false; + + final boolean allSocketsOpened = waitUntil(() -> { + try { + final InetAddress[] allAddresses; + if (serverAddress instanceof Inet4Address) { + allAddresses = NetworkUtils.getAllIPV4Addresses(); + } else { + allAddresses = NetworkUtils.getAllIPV6Addresses(); + } + final List inetAddressesToBind = Arrays.stream(allAddresses) + .filter(addr -> openedSockets.stream().noneMatch(s -> addr.equals(s.getLocalAddress()))) + .filter(addr -> failedAddresses.contains(addr) == false) + .collect(Collectors.toList()); + for (InetAddress localAddress : inetAddressesToBind) { + try { + final Socket socket = openMockSocket(serverAddress, serverPort, localAddress, portToBind); + openedSockets.add(socket); + logger.debug("opened socket [{}]", socket); + } catch (NoRouteToHostException | ConnectException e) { + logger.debug(() -> "marking address [" + localAddress + "] as failed due to:", e); + failedAddresses.add(localAddress); } - return true; - } catch (IOException e) { - logger.debug(() -> "caught exception while opening socket on [" + portToBind + "]", e); + } + if (openedSockets.size() == 0) { + logger.debug("Could not open any sockets from the available addresses"); return false; } - }); - - if (allSocketsOpened) { - latch.countDown(); - } else { - success.set(false); - IOUtils.closeWhileHandlingException(openedSockets); - openedSockets.clear(); - latch.countDown(); - return; + return true; + } catch (IOException e) { + logger.debug(() -> "caught exception while opening socket on [" + portToBind + "]", e); + return false; } - } catch (InterruptedException e) { - logger.debug(() -> "interrupted while trying to open sockets on [" + portToBind + "]", e); - Thread.currentThread().interrupt(); + }); + + if (allSocketsOpened) { + latch.countDown(); + } else { + success.set(false); + IOUtils.closeWhileHandlingException(openedSockets); + openedSockets.clear(); + latch.countDown(); + return; } try { - closeLatch.await(); - } catch (InterruptedException e) { - logger.debug("caught exception while waiting for close latch", e); - Thread.currentThread().interrupt(); + safeAwait(closeLatch); } finally { logger.debug("closing sockets on [{}]", portToBind); IOUtils.closeWhileHandlingException(openedSockets); From fa58a9d08d9696b0a19ce10cb44bba8cf752a5fb Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Aug 2024 07:25:55 +0100 Subject: [PATCH 15/20] Add known issue docs for #111854 (#111978) --- docs/reference/api-conventions.asciidoc | 1 + docs/reference/release-notes/8.15.0.asciidoc | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 25881b707d724..f8d925945401e 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -334,6 +334,7 @@ All REST API parameters (both request parameters and JSON body) support providing boolean "false" as the value `false` and boolean "true" as the value `true`. All other values will raise an error. +[[api-conventions-number-values]] [discrete] === Number Values diff --git a/docs/reference/release-notes/8.15.0.asciidoc b/docs/reference/release-notes/8.15.0.asciidoc index e2314381a4b06..2069c1bd96ff0 100644 --- a/docs/reference/release-notes/8.15.0.asciidoc +++ b/docs/reference/release-notes/8.15.0.asciidoc @@ -22,6 +22,10 @@ Either downgrade to an earlier version, upgrade to 8.15.1, or else follow the recommendation in the manual to entirely disable swap instead of using the memory lock feature (issue: {es-issue}111847[#111847]) +* The `took` field of the response to the <> API is incorrect and may be rather large. Clients which +<> assume that this value will be within a particular range (e.g. that it fits into a 32-bit +signed integer) may encounter errors (issue: {es-issue}111854[#111854]) + [[breaking-8.15.0]] [float] === Breaking changes From c80b79678935ea62af676b7431fedb0af9bcb7ba Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Tue, 20 Aug 2024 09:37:02 +0300 Subject: [PATCH 16/20] ESQL: don't lose the original casting error message (#111968) --- docs/changelog/111968.yaml | 6 ++++++ .../xpack/esql/analysis/Analyzer.java | 3 +++ .../xpack/esql/analysis/VerifierTests.java | 20 +++++++++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 docs/changelog/111968.yaml diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml new file mode 100644 index 0000000000000..9d758c76369e9 --- /dev/null +++ b/docs/changelog/111968.yaml @@ -0,0 +1,6 @@ +pr: 111968 +summary: "ESQL: don't lose the original casting error message" +area: ES|QL +type: bug +issues: + - 111967 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 4a7120a1d3d92..4a116fd102cd0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -856,6 +856,9 @@ private static List potentialCandidatesIfNoMatchesFound( Collection attrList, java.util.function.Function, String> messageProducer ) { + if (ua.customMessage()) { + return List.of(); + } // none found - add error message if (matches.isEmpty()) { Set names = new HashSet<>(attrList.size()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 9b0c32b8ade2e..ab216e10b674c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -255,10 +255,30 @@ public void testRoundFunctionInvalidInputs() { "1:31: second argument of [round(a, 3.5)] must be [integer], found value [3.5] type [double]", error("row a = 1, b = \"c\" | eval x = round(a, 3.5)") ); + } + + public void testImplicitCastingErrorMessages() { assertEquals( "1:23: Cannot convert string [c] to [INTEGER], error [Cannot parse number [c]]", error("row a = round(123.45, \"c\")") ); + assertEquals( + "1:27: Cannot convert string [c] to [DOUBLE], error [Cannot parse number [c]]", + error("row a = 1 | eval x = acos(\"c\")") + ); + assertEquals( + "1:33: Cannot convert string [c] to [DOUBLE], error [Cannot parse number [c]]\n" + + "line 1:38: Cannot convert string [a] to [INTEGER], error [Cannot parse number [a]]", + error("row a = 1 | eval x = round(acos(\"c\"),\"a\")") + ); + assertEquals( + "1:63: Cannot convert string [x] to [INTEGER], error [Cannot parse number [x]]", + error("row ip4 = to_ip(\"1.2.3.4\") | eval ip4_prefix = ip_prefix(ip4, \"x\", 0)") + ); + assertEquals( + "1:42: Cannot convert string [a] to [DOUBLE], error [Cannot parse number [a]]", + error("ROW a=[3, 5, 1, 6] | EVAL avg_a = MV_AVG(\"a\")") + ); } public void testAggsExpressionsInStatsAggs() { From ad90d1f0f62499c4ce1e31915db6cd6cc750106f Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Tue, 20 Aug 2024 09:54:55 +0300 Subject: [PATCH 17/20] Introduce global retention in data stream lifecycle (cluster settings) (#111972) In this PR we introduce cluster settings to manage the global data stream retention. We introduce two settings `data_streams.lifecycle.retention.max` & `data_streams.lifecycle.retention.default` that configure the respective retentions. The settings are loaded and monitored by the `DataStreamGlobalRetentionSettings`. The validation has also moved there. We preserved the `DataStreamGlobalRetention` record to reduce the impact of this change. The purpose of this method is to be simply a wrapper record that groups the retention settings together. Temporarily, the `DataStreamGlobalRetentionSettings` is using the DataStreamFactoryRetention which is marked as deprecated for migration purposes. --- docs/changelog/111972.yaml | 15 ++ .../data-stream-lifecycle-settings.asciidoc | 12 ++ .../datastreams/DataStreamsPlugin.java | 2 +- .../action/GetDataStreamsTransportAction.java | 14 +- .../lifecycle/DataStreamLifecycleService.java | 12 +- ...sportExplainDataStreamLifecycleAction.java | 10 +- ...TransportGetDataStreamLifecycleAction.java | 10 +- .../MetadataIndexTemplateServiceTests.java | 7 +- .../GetDataStreamsTransportActionTests.java | 45 ++--- .../DataStreamLifecycleServiceTests.java | 9 +- .../metadata/DataStreamFactoryRetention.java | 2 + .../metadata/DataStreamGlobalRetention.java | 6 +- .../DataStreamGlobalRetentionProvider.java | 34 ---- .../DataStreamGlobalRetentionSettings.java | 180 ++++++++++++++++++ .../metadata/MetadataDataStreamsService.java | 8 +- .../MetadataIndexTemplateService.java | 12 +- .../common/settings/ClusterSettings.java | 5 +- .../elasticsearch/node/NodeConstruction.java | 27 +-- .../org/elasticsearch/plugins/Plugin.java | 6 +- ...vedComposableIndexTemplateActionTests.java | 14 +- ...ataStreamGlobalRetentionProviderTests.java | 58 ------ ...ataStreamGlobalRetentionSettingsTests.java | 141 ++++++++++++++ ...amLifecycleWithRetentionWarningsTests.java | 40 ++-- .../MetadataDataStreamsServiceTests.java | 6 +- .../MetadataIndexTemplateServiceTests.java | 14 +- 25 files changed, 476 insertions(+), 213 deletions(-) create mode 100644 docs/changelog/111972.yaml delete mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java delete mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml new file mode 100644 index 0000000000000..58477c68f0e7c --- /dev/null +++ b/docs/changelog/111972.yaml @@ -0,0 +1,15 @@ +pr: 111972 +summary: Introduce global retention in data stream lifecycle. +area: Data streams +type: feature +issues: [] +highlight: + title: Add global retention in data stream lifecycle + body: "Data stream lifecycle now supports configuring retention on a cluster level,\ + \ namely global retention. Global retention \nallows us to configure two different\ + \ retentions:\n\n- `data_streams.lifecycle.retention.default` is applied to all\ + \ data streams managed by the data stream lifecycle that do not have retention\n\ + defined on the data stream level.\n- `data_streams.lifecycle.retention.max` is\ + \ applied to all data streams managed by the data stream lifecycle and it allows\ + \ any data stream \ndata to be deleted after the `max_retention` has passed." + notable: true diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc index 0f00e956472d0..4b055525d4e6c 100644 --- a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc +++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc @@ -10,6 +10,18 @@ These are the settings available for configuring <>, <>) +The maximum retention period that will apply to all user data streams managed by the data stream lifecycle. The max retention will also +override the retention of a data stream whose configured retention exceeds the max retention. It should be greater than `10s`. + +[[data-streams-lifecycle-retention-default]] +`data_streams.lifecycle.retention.default`:: +(<>, <>) +The retention period that will apply to all user data streams managed by the data stream lifecycle that do not have retention configured. +It should be greater than `10s` and less or equals than <>. + [[data-streams-lifecycle-poll-interval]] `data_streams.lifecycle.poll_interval`:: (<>, <>) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index cd233e29dee0e..615c0006a4ce6 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -201,7 +201,7 @@ public Collection createComponents(PluginServices services) { errorStoreInitialisationService.get(), services.allocationService(), dataStreamLifecycleErrorsPublisher.get(), - services.dataStreamGlobalRetentionProvider() + services.dataStreamGlobalRetentionSettings() ) ); dataLifecycleInitialisationService.get().init(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index b32ba361963e5..dcca32355082b 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -57,7 +57,7 @@ public class GetDataStreamsTransportAction extends TransportMasterNodeReadAction private static final Logger LOGGER = LogManager.getLogger(GetDataStreamsTransportAction.class); private final SystemIndices systemIndices; private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public GetDataStreamsTransportAction( @@ -67,7 +67,7 @@ public GetDataStreamsTransportAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamAction.NAME, @@ -81,7 +81,7 @@ public GetDataStreamsTransportAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.systemIndices = systemIndices; - this.dataStreamGlobalRetentionProvider = dataStreamGlobalRetentionProvider; + this.globalRetentionSettings = globalRetentionSettings; clusterSettings = clusterService.getClusterSettings(); } @@ -93,7 +93,7 @@ protected void masterOperation( ActionListener listener ) throws Exception { listener.onResponse( - innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, dataStreamGlobalRetentionProvider) + innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, globalRetentionSettings) ); } @@ -103,7 +103,7 @@ static GetDataStreamAction.Response innerOperation( IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, ClusterSettings clusterSettings, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings globalRetentionSettings ) { List dataStreams = getDataStreams(state, indexNameExpressionResolver, request); List dataStreamInfos = new ArrayList<>(dataStreams.size()); @@ -223,7 +223,7 @@ public int compareTo(IndexInfo o) { return new GetDataStreamAction.Response( dataStreamInfos, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - dataStreamGlobalRetentionProvider.provide() + globalRetentionSettings.get() ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 9e1b01ef47a88..0cb29dbcf5b2f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -44,7 +44,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -162,7 +162,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab final ResultDeduplicator transportActionsDeduplicator; final ResultDeduplicator clusterStateChangesDeduplicator; private final DataStreamLifecycleHealthInfoPublisher dslHealthInfoPublisher; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private LongSupplier nowSupplier; private final Clock clock; private final DataStreamLifecycleErrorStore errorStore; @@ -211,7 +211,7 @@ public DataStreamLifecycleService( DataStreamLifecycleErrorStore errorStore, AllocationService allocationService, DataStreamLifecycleHealthInfoPublisher dataStreamLifecycleHealthInfoPublisher, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.settings = settings; this.client = client; @@ -222,7 +222,7 @@ public DataStreamLifecycleService( this.clusterStateChangesDeduplicator = new ResultDeduplicator<>(threadPool.getThreadContext()); this.nowSupplier = nowSupplier; this.errorStore = errorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; this.scheduledJob = null; this.pollInterval = DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING.get(settings); this.targetMergePolicyFloorSegment = DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING.get(settings); @@ -819,7 +819,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo RolloverRequest rolloverRequest = getDefaultRolloverRequest( rolloverConfiguration, dataStream.getName(), - dataStream.getLifecycle().getEffectiveDataRetention(dataStream.isSystem() ? null : globalRetentionResolver.provide()), + dataStream.getLifecycle().getEffectiveDataRetention(dataStream.isSystem() ? null : globalRetentionSettings.get()), rolloverFailureStore ); transportActionsDeduplicator.executeOnce( @@ -871,7 +871,7 @@ private Index maybeExecuteRollover(ClusterState state, DataStream dataStream, bo */ Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { Metadata metadata = state.metadata(); - DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionResolver.provide(); + DataStreamGlobalRetention globalRetention = dataStream.isSystem() ? null : globalRetentionSettings.get(); List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier, globalRetention); if (backingIndicesOlderThanRetention.isEmpty()) { return Set.of(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java index 408bc3b239f23..855b1713e5ec2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportExplainDataStreamLifecycleAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -44,7 +44,7 @@ public class TransportExplainDataStreamLifecycleAction extends TransportMasterNo ExplainDataStreamLifecycleAction.Response> { private final DataStreamLifecycleErrorStore errorStore; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportExplainDataStreamLifecycleAction( @@ -54,7 +54,7 @@ public TransportExplainDataStreamLifecycleAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DataStreamLifecycleErrorStore dataLifecycleServiceErrorStore, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( ExplainDataStreamLifecycleAction.INSTANCE.name(), @@ -68,7 +68,7 @@ public TransportExplainDataStreamLifecycleAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.errorStore = dataLifecycleServiceErrorStore; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -118,7 +118,7 @@ protected void masterOperation( new ExplainDataStreamLifecycleAction.Response( explainIndices, request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.provide() + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java index 3def1351dd5e8..452295aab0ce9 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/TransportGetDataStreamLifecycleAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; @@ -40,7 +40,7 @@ public class TransportGetDataStreamLifecycleAction extends TransportMasterNodeRe GetDataStreamLifecycleAction.Request, GetDataStreamLifecycleAction.Response> { private final ClusterSettings clusterSettings; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; @Inject public TransportGetDataStreamLifecycleAction( @@ -49,7 +49,7 @@ public TransportGetDataStreamLifecycleAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { super( GetDataStreamLifecycleAction.INSTANCE.name(), @@ -63,7 +63,7 @@ public TransportGetDataStreamLifecycleAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } @Override @@ -96,7 +96,7 @@ protected void masterOperation( .sorted(Comparator.comparing(GetDataStreamLifecycleAction.Response.DataStreamLifecycle::dataStreamName)) .toList(), request.includeDefaults() ? clusterSettings.get(DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING) : null, - globalRetentionResolver.provide() + globalRetentionSettings.get() ) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index b61b70f55c734..d5356e371f497 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; @@ -216,7 +216,10 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { xContentRegistry(), EmptySystemIndices.INSTANCE, indexSettingProviders, - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java index cd3f862a51ddf..80d867ec7745e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -45,7 +45,8 @@ public class GetDataStreamsTransportActionTests extends ESTestCase { private final IndexNameExpressionResolver resolver = TestIndexNameExpressionResolver.newInstance(); private final SystemIndices systemIndices = new SystemIndices(List.of()); - private final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( + private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); @@ -165,7 +166,7 @@ public void testGetTimeSeriesDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -195,7 +196,7 @@ public void testGetTimeSeriesDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -245,7 +246,7 @@ public void testGetTimeSeriesDataStreamWithOutOfOrderIndices() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat( response.getDataStreams(), @@ -288,7 +289,7 @@ public void testGetTimeSeriesMixedDataStream() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); var name1 = DataStream.getDefaultBackingIndexName("ds-1", 1, instant.toEpochMilli()); @@ -333,30 +334,24 @@ public void testPassingGlobalRetention() { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), nullValue()); DataStreamGlobalRetention globalRetention = new DataStreamGlobalRetention( TimeValue.timeValueDays(randomIntBetween(1, 5)), TimeValue.timeValueDays(randomIntBetween(5, 10)) ); - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProviderWithSettings = new DataStreamGlobalRetentionProvider( - new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return globalRetention.maxRetention(); - } - - @Override - public TimeValue getDefaultRetention() { - return globalRetention.defaultRetention(); - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - } + DataStreamGlobalRetentionSettings withGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + globalRetention.defaultRetention() + ) + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), globalRetention.maxRetention()) + .build() + ), + DataStreamFactoryRetention.emptyFactoryRetention() ); response = GetDataStreamsTransportAction.innerOperation( state, @@ -364,7 +359,7 @@ public void init(ClusterSettings clusterSettings) { resolver, systemIndices, ClusterSettings.createBuiltInClusterSettings(), - dataStreamGlobalRetentionProviderWithSettings + withGlobalRetentionSettings ); assertThat(response.getGlobalRetention(), equalTo(globalRetention)); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 77b4d5f21529b..8cb27fd9fd282 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling.Round; @@ -138,7 +138,8 @@ public class DataStreamLifecycleServiceTests extends ESTestCase { private List clientSeenRequests; private DoExecuteDelegate clientDelegate; private ClusterService clusterService; - private final DataStreamGlobalRetentionProvider globalRetentionResolver = new DataStreamGlobalRetentionProvider( + private final DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), DataStreamFactoryRetention.emptyFactoryRetention() ); @@ -187,7 +188,7 @@ public void setupServices() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); clientDelegate = null; dataStreamLifecycleService.init(); @@ -1426,7 +1427,7 @@ public void testTrackingTimeStats() { errorStore, new FeatureService(List.of(new DataStreamFeatures())) ), - globalRetentionResolver + globalRetentionSettings ); assertThat(service.getLastRunDuration(), is(nullValue())); assertThat(service.getTimeBetweenStarts(), is(nullValue())); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java index 5b96f92193e98..be42916b07956 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFactoryRetention.java @@ -17,7 +17,9 @@ * Holds the factory retention configuration. Factory retention is the global retention configuration meant to be * used if a user hasn't provided other retention configuration via {@link DataStreamGlobalRetention} metadata in the * cluster state. + * @deprecated This interface is deprecated, please use {@link DataStreamGlobalRetentionSettings}. */ +@Deprecated public interface DataStreamFactoryRetention { @Nullable diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java index c74daa22cc137..185f625f6f91f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java @@ -18,14 +18,10 @@ import java.io.IOException; /** - * A cluster state entry that contains global retention settings that are configurable by the user. These settings include: - * - default retention, applied on any data stream managed by DSL that does not have an explicit retention defined - * - max retention, applied on every data stream managed by DSL + * Wrapper class for the {@link DataStreamGlobalRetentionSettings}. */ public record DataStreamGlobalRetention(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) implements Writeable { - public static final String TYPE = "data-stream-global-retention"; - public static final NodeFeature GLOBAL_RETENTION = new NodeFeature("data_stream.lifecycle.global_retention"); public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java deleted file mode 100644 index f1e3e18ea4d51..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProvider.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.core.Nullable; - -/** - * Provides the global retention configuration for data stream lifecycle as defined in the settings. - */ -public class DataStreamGlobalRetentionProvider { - - private final DataStreamFactoryRetention factoryRetention; - - public DataStreamGlobalRetentionProvider(DataStreamFactoryRetention factoryRetention) { - this.factoryRetention = factoryRetention; - } - - /** - * Return the global retention configuration as defined in the settings. If both settings are null, it returns null. - */ - @Nullable - public DataStreamGlobalRetention provide() { - if (factoryRetention.isDefined() == false) { - return null; - } - return new DataStreamGlobalRetention(factoryRetention.getDefaultRetention(), factoryRetention.getMaxRetention()); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java new file mode 100644 index 0000000000000..a1fcf56a92726 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettings.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * This class holds the data stream global retention settings. It defines, validates and monitors the settings. + *

+ * The global retention settings apply to non-system data streams that are managed by the data stream lifecycle. They consist of: + * - The default retention which applies to data streams that do not have a retention defined. + * - The max retention which applies to all data streams that do not have retention or their retention has exceeded this value. + *

+ * Temporarily, we fall back to {@link DataStreamFactoryRetention} to facilitate a smooth transition to these settings. + */ +public class DataStreamGlobalRetentionSettings { + + private static final Logger logger = LogManager.getLogger(DataStreamGlobalRetentionSettings.class); + public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10); + + public static final Setting DATA_STREAMS_DEFAULT_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.default", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull(settingValue); + TimeValue maxRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_MAX_RETENTION_SETTING)); + validateIsolatedRetentionValue(defaultRetention, DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_MAX_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public static final Setting DATA_STREAMS_MAX_RETENTION_SETTING = Setting.timeSetting( + "data_streams.lifecycle.retention.max", + TimeValue.MINUS_ONE, + new Setting.Validator<>() { + @Override + public void validate(TimeValue value) {} + + @Override + public void validate(final TimeValue settingValue, final Map, Object> settings) { + TimeValue defaultRetention = getSettingValueOrNull((TimeValue) settings.get(DATA_STREAMS_DEFAULT_RETENTION_SETTING)); + TimeValue maxRetention = getSettingValueOrNull(settingValue); + validateIsolatedRetentionValue(maxRetention, DATA_STREAMS_MAX_RETENTION_SETTING.getKey()); + validateGlobalRetentionConfiguration(defaultRetention, maxRetention); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(DATA_STREAMS_DEFAULT_RETENTION_SETTING); + return settings.iterator(); + } + }, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final DataStreamFactoryRetention factoryRetention; + + @Nullable + private volatile TimeValue defaultRetention; + @Nullable + private volatile TimeValue maxRetention; + + private DataStreamGlobalRetentionSettings(DataStreamFactoryRetention factoryRetention) { + this.factoryRetention = factoryRetention; + } + + @Nullable + public TimeValue getMaxRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getMaxRetention() : maxRetention; + } + + @Nullable + public TimeValue getDefaultRetention() { + return shouldFallbackToFactorySettings() ? factoryRetention.getDefaultRetention() : defaultRetention; + } + + public boolean areDefined() { + return getDefaultRetention() != null || getMaxRetention() != null; + } + + private boolean shouldFallbackToFactorySettings() { + return defaultRetention == null && maxRetention == null; + } + + /** + * Creates an instance and initialises the cluster settings listeners + * @param clusterSettings it will register the cluster settings listeners to monitor for changes + * @param factoryRetention for migration purposes, it will be removed shortly + */ + public static DataStreamGlobalRetentionSettings create(ClusterSettings clusterSettings, DataStreamFactoryRetention factoryRetention) { + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = new DataStreamGlobalRetentionSettings(factoryRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_DEFAULT_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setDefaultRetention); + clusterSettings.initializeAndWatch(DATA_STREAMS_MAX_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setMaxRetention); + return dataStreamGlobalRetentionSettings; + } + + private void setMaxRetention(TimeValue maxRetention) { + this.maxRetention = getSettingValueOrNull(maxRetention); + logger.info("Updated max factory retention to [{}]", this.maxRetention == null ? null : maxRetention.getStringRep()); + } + + private void setDefaultRetention(TimeValue defaultRetention) { + this.defaultRetention = getSettingValueOrNull(defaultRetention); + logger.info("Updated default factory retention to [{}]", this.defaultRetention == null ? null : defaultRetention.getStringRep()); + } + + private static void validateIsolatedRetentionValue(@Nullable TimeValue retention, String settingName) { + if (retention != null && retention.getMillis() < MIN_RETENTION_VALUE.getMillis()) { + throw new IllegalArgumentException( + "Setting '" + settingName + "' should be greater than " + MIN_RETENTION_VALUE.getStringRep() + ); + } + } + + private static void validateGlobalRetentionConfiguration(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) { + if (defaultRetention != null && maxRetention != null && defaultRetention.getMillis() > maxRetention.getMillis()) { + throw new IllegalArgumentException( + "Setting [" + + DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey() + + "=" + + defaultRetention.getStringRep() + + "] cannot be greater than [" + + DATA_STREAMS_MAX_RETENTION_SETTING.getKey() + + "=" + + maxRetention.getStringRep() + + "]." + ); + } + } + + @Nullable + public DataStreamGlobalRetention get() { + if (areDefined() == false) { + return null; + } + return new DataStreamGlobalRetention(getDefaultRetention(), getMaxRetention()); + } + + /** + * Time value settings do not accept null as a value. To represent an undefined retention as a setting we use the value + * of -1 and this method converts this to null. + * + * @param value the retention as parsed from the setting + * @return the value when it is not -1 and null otherwise + */ + @Nullable + private static TimeValue getSettingValueOrNull(TimeValue value) { + return value == null || value.equals(TimeValue.MINUS_ONE) ? null : value; + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index bfe7468b97a64..9cac6fa3e8796 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -41,18 +41,18 @@ public class MetadataDataStreamsService { private final ClusterService clusterService; private final IndicesService indicesService; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; private final MasterServiceTaskQueue updateLifecycleTaskQueue; private final MasterServiceTaskQueue setRolloverOnWriteTaskQueue; public MetadataDataStreamsService( ClusterService clusterService, IndicesService indicesService, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.indicesService = indicesService; - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; ClusterStateTaskExecutor updateLifecycleExecutor = new SimpleBatchedAckListenerTaskExecutor<>() { @Override @@ -223,7 +223,7 @@ ClusterState updateDataLifecycle(ClusterState currentState, List dataStr if (lifecycle != null) { if (atLeastOneDataStreamIsNotSystem) { // We don't issue any warnings if all data streams are system data streams - lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.provide()); + lifecycle.addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get()); } } return ClusterState.builder(currentState).metadata(builder.build()).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index c6eb56926eca0..ac56f3f670f43 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -137,7 +137,7 @@ public class MetadataIndexTemplateService { private final NamedXContentRegistry xContentRegistry; private final SystemIndices systemIndices; private final Set indexSettingProviders; - private final DataStreamGlobalRetentionProvider globalRetentionResolver; + private final DataStreamGlobalRetentionSettings globalRetentionSettings; /** * This is the cluster state task executor for all template-based actions. @@ -183,7 +183,7 @@ public MetadataIndexTemplateService( NamedXContentRegistry xContentRegistry, SystemIndices systemIndices, IndexSettingProviders indexSettingProviders, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { this.clusterService = clusterService; this.taskQueue = clusterService.createTaskQueue("index-templates", Priority.URGENT, TEMPLATE_TASK_EXECUTOR); @@ -193,7 +193,7 @@ public MetadataIndexTemplateService( this.xContentRegistry = xContentRegistry; this.systemIndices = systemIndices; this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); - this.globalRetentionResolver = globalRetentionResolver; + this.globalRetentionSettings = globalRetentionSettings; } public void removeTemplates( @@ -345,7 +345,7 @@ public ClusterState addComponentTemplate( tempStateWithComponentTemplateAdded.metadata(), composableTemplateName, composableTemplate, - globalRetentionResolver.provide() + globalRetentionSettings.get() ); validateIndexTemplateV2(composableTemplateName, composableTemplate, tempStateWithComponentTemplateAdded); } catch (Exception e) { @@ -369,7 +369,7 @@ public ClusterState addComponentTemplate( } if (finalComponentTemplate.template().lifecycle() != null) { - finalComponentTemplate.template().lifecycle().addWarningHeaderIfDataRetentionNotEffective(globalRetentionResolver.provide()); + finalComponentTemplate.template().lifecycle().addWarningHeaderIfDataRetentionNotEffective(globalRetentionSettings.get()); } logger.info("{} component template [{}]", existing == null ? "adding" : "updating", name); @@ -730,7 +730,7 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); - validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionResolver.provide()); + validateLifecycle(currentState.metadata(), name, templateToValidate, globalRetentionSettings.get()); if (templateToValidate.isDeprecated() == false) { validateUseOfDeprecatedComponentTemplates(name, templateToValidate, currentState.metadata().componentTemplates()); diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index d5f770ebb95fc..c023b00ec820f 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.coordination.MasterHistory; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.coordination.Reconfigurator; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.Metadata; @@ -598,6 +599,8 @@ public void apply(Settings value, Settings current, Settings previous) { TDigestExecutionHint.SETTING, MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT_SETTING, MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING, - TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE + TransportService.ENABLE_STACK_OVERFLOW_AVOIDANCE, + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING, + DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING ).filter(Objects::nonNull).collect(Collectors.toSet()); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 27a82cf6a2501..a4db9a0a0e149 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -42,7 +42,7 @@ import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; import org.elasticsearch.cluster.features.NodeFeaturesFixupListener; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; @@ -588,25 +588,27 @@ private ScriptService createScriptService(SettingsModule settingsModule, ThreadP return scriptService; } - private DataStreamGlobalRetentionProvider createDataStreamServicesAndGlobalRetentionResolver( + private DataStreamGlobalRetentionSettings createDataStreamServicesAndGlobalRetentionResolver( + Settings settings, ThreadPool threadPool, ClusterService clusterService, IndicesService indicesService, MetadataCreateIndexService metadataCreateIndexService ) { - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterService.getClusterSettings(), DataStreamFactoryRetention.load(pluginsService, clusterService.getClusterSettings()) ); - modules.bindToInstance(DataStreamGlobalRetentionProvider.class, dataStreamGlobalRetentionProvider); + modules.bindToInstance(DataStreamGlobalRetentionSettings.class, dataStreamGlobalRetentionSettings); modules.bindToInstance( MetadataCreateDataStreamService.class, new MetadataCreateDataStreamService(threadPool, clusterService, metadataCreateIndexService) ); modules.bindToInstance( MetadataDataStreamsService.class, - new MetadataDataStreamsService(clusterService, indicesService, dataStreamGlobalRetentionProvider) + new MetadataDataStreamsService(clusterService, indicesService, dataStreamGlobalRetentionSettings) ); - return dataStreamGlobalRetentionProvider; + return dataStreamGlobalRetentionSettings; } private UpdateHelper createUpdateHelper(DocumentParsingProvider documentParsingProvider, ScriptService scriptService) { @@ -815,7 +817,8 @@ private void construct( threadPool ); - final DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = createDataStreamServicesAndGlobalRetentionResolver( + final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = createDataStreamServicesAndGlobalRetentionResolver( + settings, threadPool, clusterService, indicesService, @@ -840,7 +843,7 @@ record PluginServiceInstances( IndicesService indicesService, FeatureService featureService, SystemIndices systemIndices, - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider, + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings, DocumentParsingProvider documentParsingProvider ) implements Plugin.PluginServices {} PluginServiceInstances pluginServices = new PluginServiceInstances( @@ -861,7 +864,7 @@ record PluginServiceInstances( indicesService, featureService, systemIndices, - dataStreamGlobalRetentionProvider, + dataStreamGlobalRetentionSettings, documentParsingProvider ); @@ -895,7 +898,7 @@ record PluginServiceInstances( systemIndices, indexSettingProviders, metadataCreateIndexService, - dataStreamGlobalRetentionProvider + dataStreamGlobalRetentionSettings ), pluginsService.loadSingletonServiceProvider(RestExtension.class, RestExtension::allowAll) ); @@ -1465,7 +1468,7 @@ private List> buildReservedStateHandlers( SystemIndices systemIndices, IndexSettingProviders indexSettingProviders, MetadataCreateIndexService metadataCreateIndexService, - DataStreamGlobalRetentionProvider globalRetentionResolver + DataStreamGlobalRetentionSettings globalRetentionSettings ) { List> reservedStateHandlers = new ArrayList<>(); @@ -1480,7 +1483,7 @@ private List> buildReservedStateHandlers( xContentRegistry, systemIndices, indexSettingProviders, - globalRetentionResolver + globalRetentionSettings ); reservedStateHandlers.add(new ReservedComposableIndexTemplateAction(templateService, settingsModule.getIndexScopedSettings())); diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 1815f4403019f..a8bfda54b0646 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -10,7 +10,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.routing.RerouteService; @@ -156,10 +156,10 @@ public interface PluginServices { SystemIndices systemIndices(); /** - * A service that resolves the data stream global retention that applies to + * A service that holds the data stream global retention settings that applies to * data streams managed by the data stream lifecycle. */ - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider(); + DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings(); /** * A provider of utilities to observe and report parsing of documents diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java index b2a29e2bcfeb7..32a74fef61209 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateActionTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionProvider; +import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -75,7 +76,7 @@ public class ReservedComposableIndexTemplateActionTests extends ESTestCase { ClusterService clusterService; IndexScopedSettings indexScopedSettings; IndicesService indicesService; - private DataStreamGlobalRetentionProvider globalRetentionResolver; + private DataStreamGlobalRetentionSettings globalRetentionSettings; @Before public void setup() throws IOException { @@ -92,7 +93,10 @@ public void setup() throws IOException { doReturn(mapperService).when(indexService).mapperService(); doReturn(indexService).when(indicesService).createIndex(any(), any(), anyBoolean()); - globalRetentionResolver = new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()); + globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ); templateService = new MetadataIndexTemplateService( clusterService, mock(MetadataCreateIndexService.class), @@ -101,7 +105,7 @@ public void setup() throws IOException { mock(NamedXContentRegistry.class), mock(SystemIndices.class), new IndexSettingProviders(Set.of()), - globalRetentionResolver + globalRetentionSettings ); } @@ -896,7 +900,7 @@ public void testTemplatesWithReservedPrefix() throws Exception { mock(NamedXContentRegistry.class), mock(SystemIndices.class), new IndexSettingProviders(Set.of()), - globalRetentionResolver + globalRetentionSettings ); ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).metadata(metadata).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java deleted file mode 100644 index f22664ea5b7d0..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionProviderTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class DataStreamGlobalRetentionProviderTests extends ESTestCase { - - public void testOnlyFactoryRetentionFallback() { - DataStreamFactoryRetention factoryRetention = randomNonEmptyFactoryRetention(); - DataStreamGlobalRetentionProvider resolver = new DataStreamGlobalRetentionProvider(factoryRetention); - DataStreamGlobalRetention globalRetention = resolver.provide(); - assertThat(globalRetention, notNullValue()); - assertThat(globalRetention.defaultRetention(), equalTo(factoryRetention.getDefaultRetention())); - assertThat(globalRetention.maxRetention(), equalTo(factoryRetention.getMaxRetention())); - } - - private static DataStreamFactoryRetention randomNonEmptyFactoryRetention() { - boolean withDefault = randomBoolean(); - TimeValue defaultRetention = withDefault ? TimeValue.timeValueDays(randomIntBetween(10, 20)) : null; - TimeValue maxRetention = withDefault && randomBoolean() ? null : TimeValue.timeValueDays(randomIntBetween(50, 200)); - return new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return maxRetention; - } - - @Override - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - }; - } - - public void testNoRetentionConfiguration() { - DataStreamGlobalRetentionProvider resolver = new DataStreamGlobalRetentionProvider( - DataStreamFactoryRetention.emptyFactoryRetention() - ); - assertThat(resolver.provide(), nullValue()); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java new file mode 100644 index 0000000000000..78184fd7568e5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetentionSettingsTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class DataStreamGlobalRetentionSettingsTests extends ESTestCase { + + public void testDefaults() { + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + assertThat(globalRetentionSettings.getDefaultRetention(), nullValue()); + assertThat(globalRetentionSettings.getMaxRetention(), nullValue()); + + // Fallback to factory settings + TimeValue maxFactoryValue = randomPositiveTimeValue(); + TimeValue defaultFactoryValue = randomPositiveTimeValue(); + DataStreamGlobalRetentionSettings withFactorySettings = DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + new DataStreamFactoryRetention() { + @Override + public TimeValue getMaxRetention() { + return maxFactoryValue; + } + + @Override + public TimeValue getDefaultRetention() { + return defaultFactoryValue; + } + + @Override + public void init(ClusterSettings clusterSettings) { + + } + } + ); + + assertThat(withFactorySettings.getDefaultRetention(), equalTo(defaultFactoryValue)); + assertThat(withFactorySettings.getMaxRetention(), equalTo(maxFactoryValue)); + } + + public void testMonitorsDefaultRetention() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterSettings, + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + // Test valid update + TimeValue newDefaultRetention = TimeValue.timeValueDays(randomIntBetween(1, 10)); + Settings newSettings = Settings.builder() + .put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + newDefaultRetention.toHumanReadableString(0) + ) + .build(); + clusterSettings.applySettings(newSettings); + + assertThat(newDefaultRetention, equalTo(globalRetentionSettings.getDefaultRetention())); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), TimeValue.ZERO) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString("Setting 'data_streams.lifecycle.retention.default' should be greater than") + ); + } + + public void testMonitorsMaxRetention() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create( + clusterSettings, + DataStreamFactoryRetention.emptyFactoryRetention() + ); + + // Test valid update + TimeValue newMaxRetention = TimeValue.timeValueDays(randomIntBetween(10, 30)); + Settings newSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), newMaxRetention.toHumanReadableString(0)) + .build(); + clusterSettings.applySettings(newSettings); + + assertThat(newMaxRetention, equalTo(globalRetentionSettings.getMaxRetention())); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), TimeValue.ZERO) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString("Setting 'data_streams.lifecycle.retention.max' should be greater than") + ); + } + + public void testCombinationValidation() { + ClusterSettings clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + DataStreamGlobalRetentionSettings.create(clusterSettings, DataStreamFactoryRetention.emptyFactoryRetention()); + + // Test invalid update + Settings newInvalidSettings = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), TimeValue.timeValueDays(90)) + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), TimeValue.timeValueDays(30)) + .build(); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings(newInvalidSettings) + ); + assertThat( + exception.getCause().getMessage(), + containsString( + "Setting [data_streams.lifecycle.retention.default=90d] cannot be greater than [data_streams.lifecycle.retention.max=30d]" + ) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java index acfe2b4f847c4..f6417da4fa2da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleWithRetentionWarningsTests.java @@ -128,16 +128,22 @@ public void testUpdatingLifecycleOnADataStream() { HeaderWarning.setThreadContext(threadContext); String dataStream = randomAlphaOfLength(5); TimeValue defaultRetention = randomTimeValue(2, 100, TimeUnit.DAYS); - - DataStreamFactoryRetention factoryRetention = getDefaultFactoryRetention(defaultRetention); ClusterState before = ClusterState.builder( DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStream, 2)), List.of()) ).build(); + Settings settingsWithDefaultRetention = builder().put( + DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), + defaultRetention + ).build(); + MetadataDataStreamsService metadataDataStreamsService = new MetadataDataStreamsService( mock(ClusterService.class), mock(IndicesService.class), - new DataStreamGlobalRetentionProvider(factoryRetention) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(settingsWithDefaultRetention), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); ClusterState after = metadataDataStreamsService.updateDataLifecycle(before, List.of(dataStream), DataStreamLifecycle.DEFAULT); @@ -245,7 +251,9 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { new IndexSettingProviders(Set.of()) ); TimeValue defaultRetention = randomTimeValue(2, 100, TimeUnit.DAYS); - DataStreamFactoryRetention factoryRetention = getDefaultFactoryRetention(defaultRetention); + Settings settingsWithDefaultRetention = Settings.builder() + .put(DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING.getKey(), defaultRetention) + .build(); ClusterState state = ClusterState.EMPTY_STATE; MetadataIndexTemplateService metadataIndexTemplateService = new MetadataIndexTemplateService( clusterService, @@ -255,7 +263,10 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { xContentRegistry(), EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - new DataStreamGlobalRetentionProvider(factoryRetention) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(settingsWithDefaultRetention), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); @@ -283,23 +294,4 @@ public void testValidateLifecycleInComponentTemplate() throws Exception { ) ); } - - private DataStreamFactoryRetention getDefaultFactoryRetention(TimeValue defaultRetention) { - return new DataStreamFactoryRetention() { - @Override - public TimeValue getMaxRetention() { - return null; - } - - @Override - public TimeValue getDefaultRetention() { - return defaultRetention; - } - - @Override - public void init(ClusterSettings clusterSettings) { - - } - }; - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index 7ce418301a352..e0f4936300c0e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -400,7 +401,10 @@ public void testUpdateLifecycle() { MetadataDataStreamsService service = new MetadataDataStreamsService( mock(ClusterService.class), mock(IndicesService.class), - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); { // Remove lifecycle diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index f5daac8ecd090..e66dd32b718b7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -2501,7 +2502,10 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr xContentRegistry, EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - new DataStreamGlobalRetentionProvider(DataStreamFactoryRetention.emptyFactoryRetention()) + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); final List throwables = new ArrayList<>(); @@ -2543,9 +2547,6 @@ public void onFailure(Exception e) { private MetadataIndexTemplateService getMetadataIndexTemplateService() { IndicesService indicesService = getInstanceFromNode(IndicesService.class); ClusterService clusterService = getInstanceFromNode(ClusterService.class); - DataStreamGlobalRetentionProvider dataStreamGlobalRetentionProvider = new DataStreamGlobalRetentionProvider( - DataStreamFactoryRetention.emptyFactoryRetention() - ); MetadataCreateIndexService createIndexService = new MetadataCreateIndexService( Settings.EMPTY, clusterService, @@ -2568,7 +2569,10 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { xContentRegistry(), EmptySystemIndices.INSTANCE, new IndexSettingProviders(Set.of()), - dataStreamGlobalRetentionProvider + DataStreamGlobalRetentionSettings.create( + ClusterSettings.createBuiltInClusterSettings(), + DataStreamFactoryRetention.emptyFactoryRetention() + ) ); } From e3bf795659ab2409b8bcd0804669e6602a1a30db Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 20 Aug 2024 17:11:25 +1000 Subject: [PATCH 18/20] Mute org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT testScaledFloat #112003 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index dd4dd2c7f2ec7..95fb4a32b4227 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -184,6 +184,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/111923 - class: org.elasticsearch.xpack.test.rest.XPackRestIT issue: https://github.com/elastic/elasticsearch/issues/111944 +- class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT + method: testScaledFloat + issue: https://github.com/elastic/elasticsearch/issues/112003 # Examples: # From 3390a82ef65d3c58f9e17e7eb5ae584f2691889e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Aug 2024 08:54:58 +0100 Subject: [PATCH 19/20] Remove `SnapshotDeleteListener` (#111988) This special listener is awkward to handle since it does not fit into the usual `ActionListener` framework. Moreover there's no need for it, we can have a regular listener and then a separate `Runnable` for tracking the completion of the cleanup actions. --- .../repositories/s3/S3Repository.java | 81 ++++---------- .../repositories/FilterRepository.java | 6 +- .../repositories/InvalidRepository.java | 6 +- .../repositories/Repository.java | 11 +- .../repositories/UnknownTypeRepository.java | 6 +- .../blobstore/BlobStoreRepository.java | 105 ++++++++---------- .../snapshots/SnapshotDeleteListener.java | 35 ------ .../snapshots/SnapshotsService.java | 18 ++- .../RepositoriesServiceTests.java | 6 +- .../index/shard/RestoreOnlyRepository.java | 6 +- .../xpack/ccr/repository/CcrRepository.java | 6 +- 11 files changed, 97 insertions(+), 189 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index a6edb0dec4122..d75a3e8ad433e 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -37,7 +37,6 @@ import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.Scheduler; @@ -320,7 +319,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte finalizeSnapshotContext.clusterMetadata(), finalizeSnapshotContext.snapshotInfo(), finalizeSnapshotContext.repositoryMetaVersion(), - delayedListener(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), + wrapWithWeakConsistencyProtection(ActionListener.runAfter(finalizeSnapshotContext, () -> metadataDone.onResponse(null))), info -> metadataDone.addListener(new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -339,50 +338,19 @@ public void onFailure(Exception e) { super.finalizeSnapshot(wrappedFinalizeContext); } - @Override - protected SnapshotDeleteListener wrapWithWeakConsistencyProtection(SnapshotDeleteListener listener) { - return new SnapshotDeleteListener() { - @Override - public void onDone() { - listener.onDone(); - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onRepositoryDataWritten(repositoryData); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - - @Override - public void onFailure(Exception e) { - logCooldownInfo(); - final Scheduler.Cancellable existing = finalizationFuture.getAndSet(threadPool.schedule(() -> { - final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); - assert cancellable != null; - listener.onFailure(e); - }, coolDown, snapshotExecutor)); - assert existing == null : "Already have an ongoing finalization " + finalizationFuture; - } - }; - } - /** * Wraps given listener such that it is executed with a delay of {@link #coolDown} on the snapshot thread-pool after being invoked. * See {@link #COOLDOWN_PERIOD} for details. */ - private ActionListener delayedListener(ActionListener listener) { - final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { + @Override + protected ActionListener wrapWithWeakConsistencyProtection(ActionListener listener) { + final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); assert cancellable != null; }); return new ActionListener<>() { @Override - public void onResponse(T response) { + public void onResponse(RepositoryData response) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), coolDown, snapshotExecutor) @@ -483,43 +451,34 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener snapshotDeleteListener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { getMultipartUploadCleanupListener( isReadOnly() ? 0 : MAX_MULTIPART_UPLOAD_CLEANUP_SIZE.get(getMetadata().settings()), new ActionListener<>() { @Override public void onResponse(ActionListener multipartUploadCleanupListener) { - S3Repository.super.deleteSnapshots( - snapshotIds, - repositoryDataGeneration, - minimumNodeVersion, - new SnapshotDeleteListener() { - @Override - public void onDone() { - snapshotDeleteListener.onDone(); - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - multipartUploadCleanupListener.onResponse(null); - snapshotDeleteListener.onRepositoryDataWritten(repositoryData); - } - - @Override - public void onFailure(Exception e) { - multipartUploadCleanupListener.onFailure(e); - snapshotDeleteListener.onFailure(e); - } + S3Repository.super.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, new ActionListener<>() { + @Override + public void onResponse(RepositoryData repositoryData) { + multipartUploadCleanupListener.onResponse(null); + repositoryDataUpdateListener.onResponse(repositoryData); + } + + @Override + public void onFailure(Exception e) { + multipartUploadCleanupListener.onFailure(e); + repositoryDataUpdateListener.onFailure(e); } - ); + }, onCompletion); } @Override public void onFailure(Exception e) { logger.warn("failed to get multipart uploads for cleanup during snapshot delete", e); assert false : e; // getMultipartUploadCleanupListener doesn't throw and snapshotExecutor doesn't reject anything - snapshotDeleteListener.onFailure(e); + repositoryDataUpdateListener.onFailure(e); } } ); diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index 37f1850c1fb2d..67d59924652db 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -22,7 +22,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -85,9 +84,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - in.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, listener); + in.deleteSnapshots(snapshotIds, repositoryDataGeneration, minimumNodeVersion, repositoryDataUpdateListener, onCompletion); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java index 948ae747e11a9..2aba6fbbebce2 100644 --- a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -92,9 +91,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(createCreationException()); + repositoryDataUpdateListener.onFailure(createCreationException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 06a53053bca88..fd52c21cad3f8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -22,7 +22,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.threadpool.ThreadPool; @@ -161,13 +160,19 @@ public void onFailure(Exception e) { * @param repositoryDataGeneration the generation of the {@link RepositoryData} in the repository at the start of the deletion * @param minimumNodeVersion the minimum {@link IndexVersion} across the nodes in the cluster, with which the repository * format must remain compatible - * @param listener completion listener, see {@link SnapshotDeleteListener}. + * @param repositoryDataUpdateListener listener completed when the {@link RepositoryData} is updated, or when the process fails + * without changing the repository contents - in either case, it is now safe for the next operation + * on this repository to proceed. + * @param onCompletion action executed on completion of the cleanup actions that follow a successful + * {@link RepositoryData} update; not called if {@code repositoryDataUpdateListener} completes + * exceptionally. */ void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ); /** diff --git a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java index 7821c865e166c..853de48a483a1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java @@ -21,7 +21,6 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -90,9 +89,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(createUnknownTypeException()); + repositoryDataUpdateListener.onFailure(createUnknownTypeException()); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index ddef1e1b808fe..e8af752bec179 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.snapshots.AbortedSnapshotException; import org.elasticsearch.snapshots.PausedSnapshotException; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -847,8 +846,8 @@ private RepositoryData safeRepositoryData(long repositoryDataGeneration, Map wrapWithWeakConsistencyProtection(ActionListener listener) { + return listener; } @Override @@ -856,19 +855,15 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - createSnapshotsDeletion(snapshotIds, repositoryDataGeneration, minimumNodeVersion, new ActionListener<>() { - @Override - public void onResponse(SnapshotsDeletion snapshotsDeletion) { - snapshotsDeletion.runDelete(listener); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + createSnapshotsDeletion( + snapshotIds, + repositoryDataGeneration, + minimumNodeVersion, + repositoryDataUpdateListener.delegateFailureAndWrap((l, snapshotsDeletion) -> snapshotsDeletion.runDelete(l, onCompletion)) + ); } /** @@ -933,7 +928,7 @@ private void createSnapshotsDeletion( * *

* Until the {@link RepositoryData} is updated there should be no other activities in the repository, and in particular the root - * blob must not change until it is updated by this deletion and {@link SnapshotDeleteListener#onRepositoryDataWritten} is called. + * blob must not change until it is updated by this deletion and the {@code repositoryDataUpdateListener} is completed. *

*/ class SnapshotsDeletion { @@ -1027,40 +1022,29 @@ class SnapshotsDeletion { // --------------------------------------------------------------------------------------------------------------------------------- // The overall flow of execution - void runDelete(SnapshotDeleteListener listener) { - final var releasingListener = new SnapshotDeleteListener() { - @Override - public void onDone() { - try { - shardBlobsToDelete.close(); - } finally { - listener.onDone(); - } - } - - @Override - public void onRepositoryDataWritten(RepositoryData repositoryData) { - listener.onRepositoryDataWritten(repositoryData); + void runDelete(ActionListener repositoryDataUpdateListener, Runnable onCompletion) { + final var releasingListener = repositoryDataUpdateListener.delegateResponse((l, e) -> { + try { + shardBlobsToDelete.close(); + } finally { + l.onFailure(e); } - - @Override - public void onFailure(Exception e) { - try { - shardBlobsToDelete.close(); - } finally { - listener.onFailure(e); - } - + }); + final Runnable releasingOnCompletion = () -> { + try { + shardBlobsToDelete.close(); + } finally { + onCompletion.run(); } }; if (useShardGenerations) { - runWithUniqueShardMetadataNaming(releasingListener); + runWithUniqueShardMetadataNaming(releasingListener, releasingOnCompletion); } else { - runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener)); + runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener), releasingOnCompletion); } } - private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { + private void runWithUniqueShardMetadataNaming(ActionListener repositoryDataUpdateListener, Runnable onCompletion) { SubscribableListener // First write the new shard state metadata (without the removed snapshots) and compute deletion targets @@ -1082,30 +1066,29 @@ private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { ); }) - .addListener( - ActionListener.wrap( - // Once we have updated the repository, run the clean-ups - newRepositoryData -> { - listener.onRepositoryDataWritten(newRepositoryData); - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - try (var refs = new RefCountingRunnable(listener::onDone)) { - cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); - cleanupUnlinkedShardLevelBlobs(refs.acquireListener()); - } - }, - listener::onFailure - ) - ); + .andThen((l, newRepositoryData) -> { + l.onResponse(newRepositoryData); + // Once we have updated the repository, run the unreferenced blobs cleanup in parallel to shard-level snapshot deletion + try (var refs = new RefCountingRunnable(onCompletion)) { + cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); + cleanupUnlinkedShardLevelBlobs(refs.acquireListener()); + } + }) + + .addListener(repositoryDataUpdateListener); } - private void runWithLegacyNumericShardMetadataNaming(SnapshotDeleteListener listener) { + private void runWithLegacyNumericShardMetadataNaming( + ActionListener repositoryDataUpdateListener, + Runnable onCompletion + ) { // Write the new repository data first (with the removed snapshot), using no shard generations updateRepositoryData( originalRepositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY), - ActionListener.wrap(newRepositoryData -> { + repositoryDataUpdateListener.delegateFailure((delegate, newRepositoryData) -> { try (var refs = new RefCountingRunnable(() -> { - listener.onRepositoryDataWritten(newRepositoryData); - listener.onDone(); + delegate.onResponse(newRepositoryData); + onCompletion.run(); })) { // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); @@ -1120,7 +1103,7 @@ private void runWithLegacyNumericShardMetadataNaming(SnapshotDeleteListener list ) ); } - }, listener::onFailure) + }) ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java deleted file mode 100644 index 324ad736d7248..0000000000000 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.snapshots; - -import org.elasticsearch.repositories.RepositoryData; - -public interface SnapshotDeleteListener { - - /** - * Invoked once the snapshots have been fully deleted from the repository, including all async cleanup operations, indicating that - * listeners waiting for the end of the deletion can now be notified. - */ - void onDone(); - - /** - * Invoked once the updated {@link RepositoryData} has been written to the repository and it is safe for the next repository operation - * to proceed. - * - * @param repositoryData updated repository data - */ - void onRepositoryDataWritten(RepositoryData repositoryData); - - /** - * Invoked if writing updated {@link RepositoryData} to the repository failed. Once {@link #onRepositoryDataWritten(RepositoryData)} has - * been invoked this method will never be invoked. - * - * @param e exception during metadata steps of snapshot delete - */ - void onFailure(Exception e); -} diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 6d7404d7472e5..ed88b7272245f 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -2491,19 +2492,11 @@ private void deleteSnapshotsFromRepository( ); return; } + final SubscribableListener doneFuture = new SubscribableListener<>(); repositoriesService.repository(deleteEntry.repository()) - .deleteSnapshots(snapshotIds, repositoryData.getGenId(), minNodeVersion, new SnapshotDeleteListener() { - - private final ListenableFuture doneFuture = new ListenableFuture<>(); - - @Override - public void onDone() { - logger.info("snapshots {} deleted", snapshotIds); - doneFuture.onResponse(null); - } - + .deleteSnapshots(snapshotIds, repositoryData.getGenId(), minNodeVersion, new ActionListener<>() { @Override - public void onRepositoryDataWritten(RepositoryData updatedRepoData) { + public void onResponse(RepositoryData updatedRepoData) { removeSnapshotDeletionFromClusterState( deleteEntry, updatedRepoData, @@ -2549,6 +2542,9 @@ protected void handleListeners(List> deleteListeners) { } ); } + }, () -> { + logger.info("snapshots {} deleted", snapshotIds); + doneFuture.onResponse(null); }); } } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 83cb189415f7e..59e0b955d1cff 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.test.ClusterServiceUtils; @@ -454,9 +453,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException()); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException()); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index 26e887338158d..92ce7e083df3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -27,7 +27,6 @@ import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.repositories.SnapshotShardContext; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -110,9 +109,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException()); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException()); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index d5a6e3c7e65c8..97e3a409d590d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -82,7 +82,6 @@ import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.FileRestoreContext; import org.elasticsearch.snapshots.Snapshot; -import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; @@ -371,9 +370,10 @@ public void deleteSnapshots( Collection snapshotIds, long repositoryDataGeneration, IndexVersion minimumNodeVersion, - SnapshotDeleteListener listener + ActionListener repositoryDataUpdateListener, + Runnable onCompletion ) { - listener.onFailure(new UnsupportedOperationException("Unsupported for repository of type: " + TYPE)); + repositoryDataUpdateListener.onFailure(new UnsupportedOperationException("Unsupported for repository of type: " + TYPE)); } @Override From 6f3fab974998e0aedcd8eefbf20544890fcdd068 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:39:35 +0300 Subject: [PATCH 20/20] Check for valid parentDoc before retrieving its previous (#112005) #111943 unveiled a bug in `collectChilder` where we attempt to collect the previous doc of the parent, even when the parent doc has no previous doc. Fixes #111990, #111991, #111992, #111993 --- docs/changelog/112005.yaml | 6 ++++++ .../org/elasticsearch/index/mapper/NestedObjectMapper.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112005.yaml diff --git a/docs/changelog/112005.yaml b/docs/changelog/112005.yaml new file mode 100644 index 0000000000000..2d84381e632b3 --- /dev/null +++ b/docs/changelog/112005.yaml @@ -0,0 +1,6 @@ +pr: 112005 +summary: Check for valid `parentDoc` before retrieving its previous +area: Mapping +type: bug +issues: + - 111990 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 23bdd0f559206..f3c438adcea09 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -441,7 +441,7 @@ public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf private List collectChildren(int parentDoc, BitSet parentDocs, DocIdSetIterator childIt) throws IOException { assert parentDocs.get(parentDoc) : "wrong context, doc " + parentDoc + " is not a parent of " + nestedTypePath; - final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); + final int prevParentDoc = parentDoc > 0 ? parentDocs.prevSetBit(parentDoc - 1) : -1; int childDocId = childIt.docID(); if (childDocId <= prevParentDoc) { childDocId = childIt.advance(prevParentDoc + 1);