diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 3a8f3e09e847e..9aa36e7464f01 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -58,3 +58,14 @@ if [[ "${USE_LUCENE_SNAPSHOT_CREDS:-}" == "true" ]]; then unset data fi + +if [[ "${USE_DRA_CREDENTIALS:-}" == "true" ]]; then + DRA_VAULT_ROLE_ID_SECRET=$(vault read -field=role-id secret/ci/elastic-elasticsearch/legacy-vault-credentials) + export DRA_VAULT_ROLE_ID_SECRET + + DRA_VAULT_SECRET_ID_SECRET=$(vault read -field=secret-id secret/ci/elastic-elasticsearch/legacy-vault-credentials) + export DRA_VAULT_SECRET_ID_SECRET + + DRA_VAULT_ADDR=https://secrets.elastic.co:8200 + export DRA_VAULT_ADDR +fi diff --git a/.buildkite/pipelines/dra-workflow.yml b/.buildkite/pipelines/dra-workflow.yml new file mode 100644 index 0000000000000..336bb74041be3 --- /dev/null +++ b/.buildkite/pipelines/dra-workflow.yml @@ -0,0 +1,9 @@ +steps: + - command: .buildkite/scripts/dra-workflow.sh + env: + USE_DRA_CREDENTIALS: "true" + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2204 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml new file mode 100644 index 0000000000000..8a9c153da4e0d --- /dev/null +++ b/.buildkite/pipelines/intake.template.yml @@ -0,0 +1,66 @@ +steps: + - label: sanity-check + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files precommit + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - label: part1 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part2 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part3 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - group: bwc-snapshots + steps: + - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + BWC_VERSION: $BWC_LIST + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: "{{matrix.BWC_VERSION}}" + - label: rest-compat + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkRestCompat + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - trigger: elasticsearch-dra-workflow + label: Trigger DRA snapshot workflow + async: true + build: + branch: "$BUILDKITE_BRANCH" + commit: "$BUILDKITE_COMMIT" + env: + DRA_WORKFLOW: snapshot diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml new file mode 100644 index 0000000000000..fe9f9553649c9 --- /dev/null +++ b/.buildkite/pipelines/intake.yml @@ -0,0 +1,67 @@ +# This file is auto-generated. See .buildkite/pipelines/intake.template.yml +steps: + - label: sanity-check + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files precommit + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - label: part1 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part2 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part3 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - group: bwc-snapshots + steps: + - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + BWC_VERSION: ["7.17.13", "8.9.2", "8.10.0", "8.11.0"] + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: "{{matrix.BWC_VERSION}}" + - label: rest-compat + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkRestCompat + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - trigger: elasticsearch-dra-workflow + label: Trigger DRA snapshot workflow + async: true + build: + branch: "$BUILDKITE_BRANCH" + commit: "$BUILDKITE_COMMIT" + env: + DRA_WORKFLOW: snapshot diff --git a/.buildkite/pipelines/periodic-packaging.bwc.template.yml b/.buildkite/pipelines/periodic-packaging.bwc.template.yml new file mode 100644 index 0000000000000..0ec7721381d07 --- /dev/null +++ b/.buildkite/pipelines/periodic-packaging.bwc.template.yml @@ -0,0 +1,15 @@ + - label: "{{matrix.image}} / $BWC_VERSION / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v$BWC_VERSION + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: $BWC_VERSION diff --git a/.buildkite/pipelines/periodic-packaging.template.yml b/.buildkite/pipelines/periodic-packaging.template.yml new file mode 100644 index 0000000000000..1f1852639e997 --- /dev/null +++ b/.buildkite/pipelines/periodic-packaging.template.yml @@ -0,0 +1,52 @@ +steps: + - group: packaging-tests-unix + steps: + - label: "{{matrix.image}} / packaging-tests-unix" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ destructivePackagingTest + timeout_in_minutes: 300 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-16-32768 + env: {} + - group: packaging-tests-upgrade + steps: $BWC_STEPS + - group: packaging-tests-windows + steps: + - label: "{{matrix.image}} / packaging-tests-windows" + command: | + .\.buildkite\scripts\run-script.ps1 .\.ci\scripts\packaging-test.ps1 + timeout_in_minutes: 180 + matrix: + setup: + image: + - windows-2016 + - windows-2019 + - windows-2022 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: {} diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml new file mode 100644 index 0000000000000..2271cb8889570 --- /dev/null +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -0,0 +1,1621 @@ +# This file is auto-generated. See .buildkite/pipelines/periodic-packaging.template.yml +steps: + - group: packaging-tests-unix + steps: + - label: "{{matrix.image}} / packaging-tests-unix" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ destructivePackagingTest + timeout_in_minutes: 300 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-16-32768 + env: {} + - group: packaging-tests-upgrade + steps: + - label: "{{matrix.image}} / 7.0.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.0.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.0 + + - label: "{{matrix.image}} / 7.0.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.0.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.1 + + - label: "{{matrix.image}} / 7.1.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.1.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.0 + + - label: "{{matrix.image}} / 7.1.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.1.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.1 + + - label: "{{matrix.image}} / 7.2.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.2.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.0 + + - label: "{{matrix.image}} / 7.2.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.2.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.1 + + - label: "{{matrix.image}} / 7.3.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.0 + + - label: "{{matrix.image}} / 7.3.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.1 + + - label: "{{matrix.image}} / 7.3.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.2 + + - label: "{{matrix.image}} / 7.4.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.0 + + - label: "{{matrix.image}} / 7.4.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.1 + + - label: "{{matrix.image}} / 7.4.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.2 + + - label: "{{matrix.image}} / 7.5.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.0 + + - label: "{{matrix.image}} / 7.5.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.1 + + - label: "{{matrix.image}} / 7.5.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.2 + + - label: "{{matrix.image}} / 7.6.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.0 + + - label: "{{matrix.image}} / 7.6.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.1 + + - label: "{{matrix.image}} / 7.6.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.2 + + - label: "{{matrix.image}} / 7.7.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.7.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.0 + + - label: "{{matrix.image}} / 7.7.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.7.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.1 + + - label: "{{matrix.image}} / 7.8.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.8.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.0 + + - label: "{{matrix.image}} / 7.8.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.8.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.1 + + - label: "{{matrix.image}} / 7.9.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.0 + + - label: "{{matrix.image}} / 7.9.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.1 + + - label: "{{matrix.image}} / 7.9.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.2 + + - label: "{{matrix.image}} / 7.9.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.3 + + - label: "{{matrix.image}} / 7.10.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.0 + + - label: "{{matrix.image}} / 7.10.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.1 + + - label: "{{matrix.image}} / 7.10.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.2 + + - label: "{{matrix.image}} / 7.11.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.0 + + - label: "{{matrix.image}} / 7.11.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.1 + + - label: "{{matrix.image}} / 7.11.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.2 + + - label: "{{matrix.image}} / 7.12.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.12.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.0 + + - label: "{{matrix.image}} / 7.12.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.12.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.1 + + - label: "{{matrix.image}} / 7.13.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.0 + + - label: "{{matrix.image}} / 7.13.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.1 + + - label: "{{matrix.image}} / 7.13.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.2 + + - label: "{{matrix.image}} / 7.13.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.3 + + - label: "{{matrix.image}} / 7.13.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.4 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.4 + + - label: "{{matrix.image}} / 7.14.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.0 + + - label: "{{matrix.image}} / 7.14.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.1 + + - label: "{{matrix.image}} / 7.14.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.2 + + - label: "{{matrix.image}} / 7.15.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.0 + + - label: "{{matrix.image}} / 7.15.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.1 + + - label: "{{matrix.image}} / 7.15.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.2 + + - label: "{{matrix.image}} / 7.16.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.0 + + - label: "{{matrix.image}} / 7.16.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.1 + + - label: "{{matrix.image}} / 7.16.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.2 + + - label: "{{matrix.image}} / 7.16.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.3 + + - label: "{{matrix.image}} / 7.17.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.0 + + - label: "{{matrix.image}} / 7.17.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.1 + + - label: "{{matrix.image}} / 7.17.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.2 + + - label: "{{matrix.image}} / 7.17.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.3 + + - label: "{{matrix.image}} / 7.17.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.4 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.4 + + - label: "{{matrix.image}} / 7.17.5 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.5 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.5 + + - label: "{{matrix.image}} / 7.17.6 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.6 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.6 + + - label: "{{matrix.image}} / 7.17.7 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.7 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.7 + + - label: "{{matrix.image}} / 7.17.8 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.8 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.8 + + - label: "{{matrix.image}} / 7.17.9 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.9 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.9 + + - label: "{{matrix.image}} / 7.17.10 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.10 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.10 + + - label: "{{matrix.image}} / 7.17.11 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.11 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.11 + + - label: "{{matrix.image}} / 7.17.12 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.12 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.12 + + - label: "{{matrix.image}} / 7.17.13 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.13 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.13 + + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.0 + + - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.1 + + - label: "{{matrix.image}} / 8.1.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.0 + + - label: "{{matrix.image}} / 8.1.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.1 + + - label: "{{matrix.image}} / 8.1.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.2 + + - label: "{{matrix.image}} / 8.1.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.3 + + - label: "{{matrix.image}} / 8.2.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.0 + + - label: "{{matrix.image}} / 8.2.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.1 + + - label: "{{matrix.image}} / 8.2.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.2 + + - label: "{{matrix.image}} / 8.2.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.3 + + - label: "{{matrix.image}} / 8.3.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.0 + + - label: "{{matrix.image}} / 8.3.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.1 + + - label: "{{matrix.image}} / 8.3.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.2 + + - label: "{{matrix.image}} / 8.3.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.3 + + - label: "{{matrix.image}} / 8.4.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.0 + + - label: "{{matrix.image}} / 8.4.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.1 + + - label: "{{matrix.image}} / 8.4.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.2 + + - label: "{{matrix.image}} / 8.4.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.3 + + - label: "{{matrix.image}} / 8.5.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.0 + + - label: "{{matrix.image}} / 8.5.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.1 + + - label: "{{matrix.image}} / 8.5.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.2 + + - label: "{{matrix.image}} / 8.5.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.3 + + - label: "{{matrix.image}} / 8.6.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.6.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.0 + + - label: "{{matrix.image}} / 8.6.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.6.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.1 + + - label: "{{matrix.image}} / 8.6.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.6.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.2 + + - label: "{{matrix.image}} / 8.7.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.7.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.0 + + - label: "{{matrix.image}} / 8.7.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.7.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.1 + + - label: "{{matrix.image}} / 8.8.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.8.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.0 + + - label: "{{matrix.image}} / 8.8.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.8.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.1 + + - label: "{{matrix.image}} / 8.8.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.8.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.2 + + - label: "{{matrix.image}} / 8.9.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.9.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.0 + + - label: "{{matrix.image}} / 8.9.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.9.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.1 + + - label: "{{matrix.image}} / 8.9.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.9.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.2 + + - label: "{{matrix.image}} / 8.10.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.10.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.10.0 + + - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.0 + + - group: packaging-tests-windows + steps: + - label: "{{matrix.image}} / packaging-tests-windows" + command: | + .\.buildkite\scripts\run-script.ps1 .\.ci\scripts\packaging-test.ps1 + timeout_in_minutes: 180 + matrix: + setup: + image: + - windows-2016 + - windows-2019 + - windows-2022 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: {} diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml new file mode 100644 index 0000000000000..520089286ec36 --- /dev/null +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -0,0 +1,82 @@ +steps: + - group: platform-support-unix + steps: + - label: "{{matrix.image}} / platform-support-unix" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true check + timeout_in_minutes: 420 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-32-98304 + env: {} + - group: platform-support-windows + steps: + - label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-windows" + command: | + .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 420 + matrix: + setup: + image: + - windows-2016 + - windows-2019 + - windows-2022 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - group: platform-support-arm + steps: + - label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-arm" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true {{matrix.GRADLE_TASK}} + timeout_in_minutes: 420 + matrix: + setup: + image: + - almalinux-8-aarch64 + - ubuntu-2004-aarch64 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: aws + imagePrefix: elasticsearch-{{matrix.image}} + instanceType: m6g.8xlarge + diskSizeGb: 350 + diskType: gp3 + diskName: /dev/sda1 + env: + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" diff --git a/.buildkite/pipelines/periodic.bwc.template.yml b/.buildkite/pipelines/periodic.bwc.template.yml new file mode 100644 index 0000000000000..8a8c43d75e3ef --- /dev/null +++ b/.buildkite/pipelines/periodic.bwc.template.yml @@ -0,0 +1,10 @@ + - label: $BWC_VERSION / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: $BWC_VERSION \ No newline at end of file diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml new file mode 100644 index 0000000000000..3457af88c1f0b --- /dev/null +++ b/.buildkite/pipelines/periodic.template.yml @@ -0,0 +1,105 @@ +steps: + - group: bwc + steps: $BWC_STEPS + - label: concurrent-search-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: encryption-at-rest + command: .buildkite/scripts/encryption-at-rest.sh + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: eql-correctness + command: .buildkite/scripts/eql-correctness.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: example-plugins + command: |- + cd $$WORKSPACE/plugins/examples + + $$WORKSPACE/.ci/scripts/run-gradle.sh build + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - group: java-fips-matrix + steps: + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-fips-matrix" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true $$GRADLE_TASK + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - openjdk17 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - group: java-matrix + steps: + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true $$GRADLE_TASK + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - graalvm-ce17 + - openjdk17 + - openjdk18 + - openjdk19 + - openjdk20 + - openjdk21 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: release-tests + command: .buildkite/scripts/release-tests.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: single-processor-node-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.configure_test_clusters_with_one_processor=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 diff --git a/.buildkite/pipelines/periodic.trigger.yml b/.buildkite/pipelines/periodic.trigger.yml deleted file mode 100644 index 5d5d592448b5d..0000000000000 --- a/.buildkite/pipelines/periodic.trigger.yml +++ /dev/null @@ -1,16 +0,0 @@ -steps: - - trigger: elasticsearch-periodic - label: Trigger periodic pipeline for main - async: true - build: - branch: main - - trigger: elasticsearch-periodic - label: Trigger periodic pipeline for 8.9 - async: true - build: - branch: "8.9" - - trigger: elasticsearch-periodic - label: Trigger periodic pipeline for 7.17 - async: true - build: - branch: "7.17" diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 95dba6e1d44f3..1601575feeffe 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1,9 +1,1027 @@ +# This file is auto-generated. See .buildkite/pipelines/periodic.template.yml steps: + - group: bwc + steps: + - label: 7.0.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.0 + - label: 7.0.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.1 + - label: 7.1.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.0 + - label: 7.1.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.1 + - label: 7.2.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.2.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.0 + - label: 7.2.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.2.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.1 + - label: 7.3.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.0 + - label: 7.3.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.1 + - label: 7.3.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.2 + - label: 7.4.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.0 + - label: 7.4.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.1 + - label: 7.4.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.2 + - label: 7.5.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.0 + - label: 7.5.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.1 + - label: 7.5.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.2 + - label: 7.6.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.0 + - label: 7.6.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.1 + - label: 7.6.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.2 + - label: 7.7.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.7.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.0 + - label: 7.7.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.7.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.1 + - label: 7.8.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.8.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.0 + - label: 7.8.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.8.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.1 + - label: 7.9.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.0 + - label: 7.9.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.1 + - label: 7.9.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.2 + - label: 7.9.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.3 + - label: 7.10.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.0 + - label: 7.10.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.1 + - label: 7.10.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.2 + - label: 7.11.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.0 + - label: 7.11.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.1 + - label: 7.11.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.2 + - label: 7.12.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.12.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.0 + - label: 7.12.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.12.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.1 + - label: 7.13.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.0 + - label: 7.13.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.1 + - label: 7.13.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.2 + - label: 7.13.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.3 + - label: 7.13.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.4#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.4 + - label: 7.14.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.0 + - label: 7.14.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.1 + - label: 7.14.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.2 + - label: 7.15.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.0 + - label: 7.15.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.1 + - label: 7.15.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.2 + - label: 7.16.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.0 + - label: 7.16.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.1 + - label: 7.16.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.2 + - label: 7.16.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.3 + - label: 7.17.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.0 + - label: 7.17.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.1 + - label: 7.17.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.2 + - label: 7.17.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.3 + - label: 7.17.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.4#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.4 + - label: 7.17.5 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.5#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.5 + - label: 7.17.6 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.6#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.6 + - label: 7.17.7 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.7#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.7 + - label: 7.17.8 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.8#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.8 + - label: 7.17.9 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.9#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.9 + - label: 7.17.10 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.10#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.10 + - label: 7.17.11 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.11#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.11 + - label: 7.17.12 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.12#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.12 + - label: 7.17.13 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.13#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.13 + - label: 8.0.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.0 + - label: 8.0.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.1 + - label: 8.1.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.0 + - label: 8.1.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.1 + - label: 8.1.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.2 + - label: 8.1.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.3 + - label: 8.2.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.0 + - label: 8.2.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.1 + - label: 8.2.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.2 + - label: 8.2.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.3 + - label: 8.3.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.0 + - label: 8.3.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.1 + - label: 8.3.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.2 + - label: 8.3.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.3 + - label: 8.4.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.0 + - label: 8.4.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.1 + - label: 8.4.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.2 + - label: 8.4.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.3 + - label: 8.5.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.0 + - label: 8.5.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.1 + - label: 8.5.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.2 + - label: 8.5.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.3 + - label: 8.6.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.6.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.0 + - label: 8.6.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.6.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.1 + - label: 8.6.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.6.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.2 + - label: 8.7.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.7.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.0 + - label: 8.7.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.7.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.1 + - label: 8.8.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.8.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.0 + - label: 8.8.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.8.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.1 + - label: 8.8.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.8.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.2 + - label: 8.9.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.9.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.0 + - label: 8.9.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.9.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.1 + - label: 8.9.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.9.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.2 + - label: 8.10.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.10.0 + - label: 8.11.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.0 + - label: concurrent-search-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: encryption-at-rest + command: .buildkite/scripts/encryption-at-rest.sh + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: eql-correctness + command: .buildkite/scripts/eql-correctness.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: example-plugins + command: |- + cd $$WORKSPACE/plugins/examples + + $$WORKSPACE/.ci/scripts/run-gradle.sh build + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk - group: java-fips-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-fips-matrix" command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true $$GRADLE_TASK - timeout_in_minutes: 180 + timeout_in_minutes: 300 matrix: setup: ES_RUNTIME_JAVA: @@ -26,7 +1044,7 @@ steps: steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true $$GRADLE_TASK - timeout_in_minutes: 180 + timeout_in_minutes: 300 matrix: setup: ES_RUNTIME_JAVA: @@ -50,50 +1068,19 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" - - group: packaging-tests-windows - steps: - - label: "{{matrix.image}} / packaging-tests-windows" - command: | - .\.buildkite\scripts\run-script.ps1 .\.ci\scripts\packaging-test.ps1 - timeout_in_minutes: 180 - matrix: - setup: - image: - - windows-2016 - - windows-2019 - - windows-2022 - agents: - provider: gcp - image: family/brian-elasticsearch-{{matrix.image}} - imageProject: elastic-images-qa - machineType: custom-32-98304 - diskType: pd-ssd - diskSizeGb: 350 - env: {} - - group: platform-support-windows - steps: - - label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-windows" - command: | - .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh - timeout_in_minutes: 360 - matrix: - setup: - image: - - windows-2016 - - windows-2019 - - windows-2022 - GRADLE_TASK: - - checkPart1 - - checkPart2 - - checkPart3 - - bwcTestSnapshots - - checkRestCompat - agents: - provider: gcp - image: family/brian-elasticsearch-{{matrix.image}} - imageProject: elastic-images-qa - machineType: custom-32-98304 - diskType: pd-ssd - diskSizeGb: 350 - env: - GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: release-tests + command: .buildkite/scripts/release-tests.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: single-processor-node-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.configure_test_clusters_with_one_processor=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 diff --git a/.buildkite/scripts/branches.sh b/.buildkite/scripts/branches.sh new file mode 100755 index 0000000000000..886fa59e4d02c --- /dev/null +++ b/.buildkite/scripts/branches.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# This determines which branches will have pipelines triggered periodically, for dra workflows. +BRANCHES=( $(cat branches.json | jq -r '.branches[].branch') ) diff --git a/.buildkite/scripts/dra-update-staging.sh b/.buildkite/scripts/dra-update-staging.sh new file mode 100755 index 0000000000000..676361bf1cfcf --- /dev/null +++ b/.buildkite/scripts/dra-update-staging.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -euo pipefail + +source .buildkite/scripts/branches.sh + +for BRANCH in "${BRANCHES[@]}"; do + # Don't publish main branch to staging + if [[ "$BRANCH" == "main" ]]; then + continue + fi + + echo "--- Checking $BRANCH" + + BEATS_MANIFEST=$(curl -sS "https://artifacts-staging.elastic.co/beats/latest/${BRANCH}.json" | jq -r '.manifest_url') + ML_MANIFEST=$(curl -sS "https://artifacts-staging.elastic.co/ml-cpp/latest/${BRANCH}.json" | jq -r '.manifest_url') + ES_MANIFEST=$(curl -sS "https://artifacts-staging.elastic.co/elasticsearch/latest/${BRANCH}.json" | jq -r '.manifest_url') + + ES_BEATS_DEPENDENCY=$(curl -sS "$ES_MANIFEST" | jq -r '.projects.elasticsearch.dependencies[] | select(.prefix == "beats") | .build_uri') + ES_ML_DEPENDENCY=$(curl -sS "$ES_MANIFEST" | jq -r '.projects.elasticsearch.dependencies[] | select(.prefix == "ml-cpp") | .build_uri') + + SHOULD_TRIGGER="" + + if [ "$BEATS_MANIFEST" = "$ES_BEATS_DEPENDENCY" ]; then + echo "ES has the latest beats" + else + echo "Need to trigger a build, $BEATS_MANIFEST available but ES has $ES_BEATS_DEPENDENCY" + SHOULD_TRIGGER=true + fi + + if [ "$ML_MANIFEST" = "$ES_ML_DEPENDENCY" ]; then + echo "ES has the latest ml-cpp" + else + echo "Need to trigger a build, $ML_MANIFEST available but ES has $ES_ML_DEPENDENCY" + SHOULD_TRIGGER=true + fi + + if [[ "$SHOULD_TRIGGER" == "true" ]]; then + echo "Triggering DRA staging workflow for $BRANCH" + cat << EOF | buildkite-agent pipeline upload +steps: + - trigger: elasticsearch-dra-workflow + label: Trigger DRA staging workflow for $BRANCH + async: true + build: + branch: "$BRANCH" + env: + DRA_WORKFLOW: staging +EOF + fi +done diff --git a/.buildkite/scripts/dra-workflow.sh b/.buildkite/scripts/dra-workflow.sh new file mode 100755 index 0000000000000..4611379009a08 --- /dev/null +++ b/.buildkite/scripts/dra-workflow.sh @@ -0,0 +1,90 @@ +#!/bin/bash + +set -euo pipefail + +WORKFLOW="${DRA_WORKFLOW:-snapshot}" +BRANCH="${BUILDKITE_BRANCH:-}" + +# Don't publish main branch to staging +if [[ "$BRANCH" == "main" && "$WORKFLOW" == "staging" ]]; then + exit 0 +fi + +echo --- Preparing + +# TODO move this to image +sudo apt-get update -y +sudo apt-get install -y libxml2-utils python3.10-venv + +RM_BRANCH="$BRANCH" +if [[ "$BRANCH" == "main" ]]; then + RM_BRANCH=master +fi + +ES_VERSION=$(grep elasticsearch build-tools-internal/version.properties | sed "s/elasticsearch *= *//g") + +VERSION_SUFFIX="" +if [[ "$WORKFLOW" == "snapshot" ]]; then + VERSION_SUFFIX="-SNAPSHOT" +fi + +BEATS_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh beats "$RM_BRANCH" "$ES_VERSION" "$WORKFLOW")" +ML_CPP_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh ml-cpp "$RM_BRANCH" "$ES_VERSION" "$WORKFLOW")" + +LICENSE_KEY_ARG="" +BUILD_SNAPSHOT_ARG="" + +if [[ "$WORKFLOW" == "staging" ]]; then + LICENSE_KEY=$(mktemp -d)/license.key + # Notice that only the public key is being read here, which isn't really secret + vault read -field pubkey secret/ci/elastic-elasticsearch/migrated/license | base64 --decode > "$LICENSE_KEY" + LICENSE_KEY_ARG="-Dlicense.key=$LICENSE_KEY" + + BUILD_SNAPSHOT_ARG="-Dbuild.snapshot=false" +fi + +echo --- Building release artifacts + +.ci/scripts/run-gradle.sh -Ddra.artifacts=true \ + -Ddra.artifacts.dependency.beats="${BEATS_BUILD_ID}" \ + -Ddra.artifacts.dependency.ml-cpp="${ML_CPP_BUILD_ID}" \ + -Ddra.workflow="$WORKFLOW" \ + -Dcsv="$WORKSPACE/build/distributions/dependencies-${ES_VERSION}${VERSION_SUFFIX}.csv" \ + $LICENSE_KEY_ARG \ + $BUILD_SNAPSHOT_ARG \ + buildReleaseArtifacts \ + exportCompressedDockerImages \ + :distribution:generateDependenciesReport + +PATH="$PATH:${JAVA_HOME}/bin" # Required by the following script +x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_SUFFIX" + +# we regenerate this file as part of the release manager invocation +rm "build/distributions/elasticsearch-jdbc-${ES_VERSION}${VERSION_SUFFIX}.taco.sha512" + +# Allow other users access to read the artifacts so they are readable in the +# container +find "$WORKSPACE" -type f -path "*/build/distributions/*" -exec chmod a+r {} \; + +# Allow other users write access to create checksum files +find "$WORKSPACE" -type d -path "*/build/distributions" -exec chmod a+w {} \; + +echo --- Running release-manager + +# Artifacts should be generated +docker run --rm \ + --name release-manager \ + -e VAULT_ADDR="$DRA_VAULT_ADDR" \ + -e VAULT_ROLE_ID="$DRA_VAULT_ROLE_ID_SECRET" \ + -e VAULT_SECRET_ID="$DRA_VAULT_SECRET_ID_SECRET" \ + --mount type=bind,readonly=false,src="$PWD",target=/artifacts \ + docker.elastic.co/infra/release-manager:latest \ + cli collect \ + --project elasticsearch \ + --branch "$RM_BRANCH" \ + --commit "$BUILDKITE_COMMIT" \ + --workflow "$WORKFLOW" \ + --version "$ES_VERSION" \ + --artifact-set main \ + --dependency "beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \ + --dependency "ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" diff --git a/.buildkite/scripts/dra-workflow.trigger.sh b/.buildkite/scripts/dra-workflow.trigger.sh new file mode 100755 index 0000000000000..5ef756c30bccc --- /dev/null +++ b/.buildkite/scripts/dra-workflow.trigger.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -euo pipefail + +echo "steps:" + +source .buildkite/scripts/branches.sh + +for BRANCH in "${BRANCHES[@]}"; do + if [[ "$BRANCH" == "main" ]]; then + continue + fi + + INTAKE_PIPELINE_SLUG="elasticsearch-intake" + BUILD_JSON=$(curl -sH "Authorization: Bearer ${BUILDKITE_API_TOKEN}" "https://api.buildkite.com/v2/organizations/elastic/pipelines/${INTAKE_PIPELINE_SLUG}/builds?branch=${BRANCH}&state=passed&per_page=1" | jq '.[0] | {commit: .commit, url: .web_url}') + LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') + + cat < "${eql_test_credentials_file}" + +.ci/scripts/run-gradle.sh -Dignore.tests.seed :x-pack:plugin:eql:qa:correctness:check diff --git a/.buildkite/scripts/periodic.trigger.sh b/.buildkite/scripts/periodic.trigger.sh new file mode 100644 index 0000000000000..3571d112c5b6d --- /dev/null +++ b/.buildkite/scripts/periodic.trigger.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +set -euo pipefail + +echo "steps:" + +source .buildkite/scripts/branches.sh + +for BRANCH in "${BRANCHES[@]}"; do + INTAKE_PIPELINE_SLUG="elasticsearch-intake" + BUILD_JSON=$(curl -sH "Authorization: Bearer ${BUILDKITE_API_TOKEN}" "https://api.buildkite.com/v2/organizations/elastic/pipelines/${INTAKE_PIPELINE_SLUG}/builds?branch=${BRANCH}&state=passed&per_page=1" | jq '.[0] | {commit: .commit, url: .web_url}') + LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') + + cat < transportVersions = new HashMap<>(); + Map compatibilityVersions = new HashMap<>(); for (int i = 1; i <= numNodes; i++) { String id = "node" + i; nb.add(Allocators.newNode(id, Collections.singletonMap("tag", "tag_" + (i % numTags)))); - transportVersions.put(id, TransportVersion.current()); + compatibilityVersions.put(id, new CompatibilityVersions(TransportVersion.current())); } initialClusterState = ClusterState.builder(ClusterName.DEFAULT) .metadata(metadata) .routingTable(routingTable) .nodes(nb) - .transportVersions(transportVersions) + .compatibilityVersions(compatibilityVersions) .build(); } diff --git a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml index 6a12ee5b0403b..211faf973b772 100644 --- a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml +++ b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml @@ -30,8 +30,9 @@ - + + diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index c22339be5332f..be3fea399a830 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -156,5 +156,5 @@ org.elasticsearch.cluster.service.ClusterService#submitUnbatchedStateUpdateTask( org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer) org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer, org.elasticsearch.cluster.ClusterStateAckListener) -@defaultMessage ClusterState#transportVersions are for internal use only. Use ClusterState#getMinTransportVersion or a different version. See TransportVersion javadocs for more info. -org.elasticsearch.cluster.ClusterState#transportVersions() +@defaultMessage ClusterState#compatibilityVersions are for internal use only. Use ClusterState#getMinVersions or a different version. See TransportVersion javadocs for more info. +org.elasticsearch.cluster.ClusterState#compatibilityVersions() diff --git a/build.gradle b/build.gradle index 3acd0fa6195eb..c33489c46b53c 100644 --- a/build.gradle +++ b/build.gradle @@ -72,9 +72,48 @@ tasks.register("updateCIBwcVersions") { file << " - \"$it\"\n" } } + + def writeBuildkiteList = { String outputFilePath, String pipelineTemplatePath, List versions -> + def outputFile = file(outputFilePath) + def pipelineTemplate = file(pipelineTemplatePath) + + def listString = "[" + versions.collect { "\"${it}\"" }.join(", ") + "]" + outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll('\\$BWC_LIST', listString) + } + + def writeBuildkiteSteps = { String outputFilePath, String pipelineTemplatePath, String stepTemplatePath, List versions -> + def outputFile = file(outputFilePath) + def pipelineTemplate = file(pipelineTemplatePath) + def stepTemplate = file(stepTemplatePath) + + def steps = "" + versions.each { + steps += "\n" + stepTemplate.text.replaceAll('\\$BWC_VERSION', it.toString()) + } + + outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll(' *\\$BWC_STEPS', steps) + } + doLast { writeVersions(file(".ci/bwcVersions"), BuildParams.bwcVersions.allIndexCompatible) writeVersions(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) + writeBuildkiteList( + ".buildkite/pipelines/intake.yml", + ".buildkite/pipelines/intake.template.yml", + BuildParams.bwcVersions.unreleasedIndexCompatible + ) + writeBuildkiteSteps( + ".buildkite/pipelines/periodic.yml", + ".buildkite/pipelines/periodic.template.yml", + ".buildkite/pipelines/periodic.bwc.template.yml", + BuildParams.bwcVersions.allIndexCompatible + ) + writeBuildkiteSteps( + ".buildkite/pipelines/periodic-packaging.yml", + ".buildkite/pipelines/periodic-packaging.template.yml", + ".buildkite/pipelines/periodic-packaging.bwc.template.yml", + BuildParams.bwcVersions.allIndexCompatible + ) } } diff --git a/catalog-info.yaml b/catalog-info.yaml index 2258cc11beeef..dd2f3c105026e 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -104,7 +104,7 @@ apiVersion: backstage.io/v1alpha1 kind: Resource metadata: name: buildkite-pipeline-elasticsearch-periodic-trigger - description: Triggers periodic pipeline for all required branches + description: Triggers periodic pipelines for all required branches links: - title: Pipeline url: https://buildkite.com/elastic/elasticsearch-periodic-trigger @@ -116,11 +116,11 @@ spec: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline metadata: - description: ":elasticsearch: Triggers periodic pipeline for all required branches" + description: ":elasticsearch: Triggers periodic pipelines for all required branches" name: elasticsearch / periodic / trigger spec: repository: elastic/elasticsearch - pipeline_file: .buildkite/pipelines/periodic.trigger.yml + pipeline_file: .buildkite/scripts/periodic.trigger.sh branch_configuration: main teams: elasticsearch-team: {} diff --git a/distribution/build.gradle b/distribution/build.gradle index 2a7ad283a81c6..90af1472deb2e 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -411,7 +411,9 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { if (testDistro) { from buildServerNoticeTaskProvider } else { - from buildDefaultNoticeTaskProvider + from (buildDefaultNoticeTaskProvider) { + fileMode = 0644 + } } } } diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 466dc74d19e8e..809e06d4a9cea 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -7,8 +7,10 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] :jdk_major: 11 :build_type: tar -:docker-repo: docker.elastic.co/elasticsearch/elasticsearch -:docker-image: {docker-repo}:{version} +:docker-repo: docker.elastic.co/elasticsearch/elasticsearch +:docker-image: {docker-repo}:{version} +:kib-docker-repo: docker.elastic.co/kibana/kibana +:kib-docker-image: {kib-docker-repo}:{version} :plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins /////// diff --git a/docs/changelog/94607.yaml b/docs/changelog/94607.yaml new file mode 100644 index 0000000000000..eea9264ce90f9 --- /dev/null +++ b/docs/changelog/94607.yaml @@ -0,0 +1,18 @@ +pr: 94607 +summary: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers +area: Engine +type: enhancement +issues: [] +highlight: + title: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers + body: |- + Rather than forcing a refresh to reclaim memory from indexing buffers, which flushes all + segments no matter how large, Elasticsearch now takes advantage of + `IndexWriter#flushNextBuffer` which only flushes the largest pending segment. This should smooth + out indexing allowing for larger segment sizes, fewer merges and higher throughput. + + Furthermore, the selection algorithm to pick which shard to reclaim memory from next was + changed, from picking the shard that uses the most RAM to going over shards in a round-robin + fashion. This approach has proved to work significantly better in practice. + + notable: true diff --git a/docs/changelog/98360.yaml b/docs/changelog/98360.yaml new file mode 100644 index 0000000000000..b6b8696259c98 --- /dev/null +++ b/docs/changelog/98360.yaml @@ -0,0 +1,6 @@ +pr: 98360 +summary: Use a competitive iterator in `FiltersAggregator` +area: Aggregations +type: enhancement +issues: + - 97544 diff --git a/docs/changelog/98840.yaml b/docs/changelog/98840.yaml new file mode 100644 index 0000000000000..bb358916354dc --- /dev/null +++ b/docs/changelog/98840.yaml @@ -0,0 +1,6 @@ +pr: 98840 +summary: Don't ignore empty index template that have no template definition +area: TSDB +type: bug +issues: + - 98834 diff --git a/docs/changelog/98996.yaml b/docs/changelog/98996.yaml new file mode 100644 index 0000000000000..1f1bdd35ff643 --- /dev/null +++ b/docs/changelog/98996.yaml @@ -0,0 +1,5 @@ +pr: 98996 +summary: Reintroduce `sparse_vector` mapping +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/99106.yaml b/docs/changelog/99106.yaml new file mode 100644 index 0000000000000..21cb121595d2b --- /dev/null +++ b/docs/changelog/99106.yaml @@ -0,0 +1,6 @@ +pr: 99106 +summary: "Add support for Persian language stemmer" +area: Analysis +type: feature +issues: + - 98911 diff --git a/docs/changelog/99188.yaml b/docs/changelog/99188.yaml new file mode 100644 index 0000000000000..c22e3ba4b36e5 --- /dev/null +++ b/docs/changelog/99188.yaml @@ -0,0 +1,6 @@ +pr: 99188 +summary: "ESQL: skip synthetic attributes when planning the physical fragment" +area: ES|QL +type: bug +issues: + - 99170 diff --git a/docs/changelog/99215.yaml b/docs/changelog/99215.yaml new file mode 100644 index 0000000000000..99227839b491e --- /dev/null +++ b/docs/changelog/99215.yaml @@ -0,0 +1,6 @@ +pr: 99215 +summary: Skip `DisiPriorityQueue` on single filter agg +area: Aggregations +type: enhancement +issues: + - 99202 diff --git a/docs/changelog/99222.yaml b/docs/changelog/99222.yaml new file mode 100644 index 0000000000000..025c5e01d2a53 --- /dev/null +++ b/docs/changelog/99222.yaml @@ -0,0 +1,5 @@ +pr: 99222 +summary: Fork response-sending in `OpenPointInTimeAction` +area: Search +type: bug +issues: [] diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index b88c5de0b8185..162164e12872d 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -214,6 +214,9 @@ Norwegian (Nynorsk):: {lucene-analysis-docs}/no/NorwegianLightStemmer.html[*`light_nynorsk`*], {lucene-analysis-docs}/no/NorwegianMinimalStemmer.html[`minimal_nynorsk`] +Persian:: +{lucene-analysis-docs}/fa/PersianStemmer.html[*`persian`*] + Portuguese:: https://dl.acm.org/citation.cfm?id=1141523&dl=ACM&coll=DL&CFID=179095584&CFTOKEN=80067181[*`light_portuguese`*], pass:macros[http://www.inf.ufrgs.br/~buriol/papers/Orengo_CLEF07.pdf[`minimal_portuguese`\]], diff --git a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc index 8593bab0c865a..29fe2b0aaf35e 100644 --- a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc @@ -48,6 +48,8 @@ To add a remote cluster using API key authentication: . <> . <> +If you run into any issues, refer to <>. + [[remote-clusters-prerequisites-api-key]] ==== Prerequisites diff --git a/docs/reference/modules/cluster/remote-clusters-cert.asciidoc b/docs/reference/modules/cluster/remote-clusters-cert.asciidoc index 60d6f1a186175..36dbde331f484 100644 --- a/docs/reference/modules/cluster/remote-clusters-cert.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-cert.asciidoc @@ -8,6 +8,8 @@ To add a remote cluster using TLS certificate authentication: . <> . <> +If you run into any issues, refer to <>. + [[remote-clusters-prerequisites-cert]] ==== Prerequisites diff --git a/docs/reference/modules/cluster/remote-clusters-migration.asciidoc b/docs/reference/modules/cluster/remote-clusters-migration.asciidoc index 9bab49bffae18..9db7c4a0257ad 100644 --- a/docs/reference/modules/cluster/remote-clusters-migration.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-migration.asciidoc @@ -32,6 +32,8 @@ following these steps: . <> . <> +If you run into any issues, refer to <>. + [[remote-clusters-migration-prerequisites]] ==== Prerequisites @@ -208,11 +210,10 @@ existing indices that were created from the auto-follow pattern. [[remote-clusters-migration-disable-cert]] ==== Disable certificate based authentication and authorization -//TODO: Add link to troubleshooting docs when they're published NOTE: Only proceed with this step if the migration has been proved successful on -the local cluster. If the migration is unsuccessful, either find out what the -problem is and attempt to fix it or <>. +the local cluster. If the migration is unsuccessful, either +<> or <>. Next, disable the certification based connection. Optionally, you can also revoke the authorization. diff --git a/docs/reference/modules/cluster/remote-clusters-remote-info.asciidoc b/docs/reference/modules/cluster/remote-clusters-remote-info.asciidoc new file mode 100644 index 0000000000000..c1ab215b66413 --- /dev/null +++ b/docs/reference/modules/cluster/remote-clusters-remote-info.asciidoc @@ -0,0 +1,32 @@ +[source,console] +---- +GET /_remote/info +---- +// TEST[skip:TODO] + +The API should return `"connected" : true`. When using +<>, it should also return +`"cluster_credentials": "::es_redacted::"`. + +[source,console-result,subs=attributes+] +---- +{ + "cluster_one" : { + "seeds" : [ + "127.0.0.1:9443" + ], + "connected" : true, <1> + "num_nodes_connected" : 1, + "max_connections_per_cluster" : 3, + "initial_connect_timeout" : "30s", + "skip_unavailable" : false, + "cluster_credentials": "::es_redacted::", <2> + "mode" : "sniff" + } +} +---- +// TEST[skip:TODO] +<1> The remote cluster has connected successfully. +<2> If present, indicates the remote cluster has connected using +<> instead of +<>. \ No newline at end of file diff --git a/docs/reference/modules/cluster/remote-clusters-troubleshooting.asciidoc b/docs/reference/modules/cluster/remote-clusters-troubleshooting.asciidoc new file mode 100644 index 0000000000000..5dc6ab8c08c88 --- /dev/null +++ b/docs/reference/modules/cluster/remote-clusters-troubleshooting.asciidoc @@ -0,0 +1,394 @@ +[[remote-clusters-troubleshooting]] +=== Troubleshooting remote clusters + +++++ +Troubleshooting +++++ + +You may encounter several issues when setting up a remote cluster for {ccr} or +{ccs}. + +[[remote-clusters-troubleshooting-general]] +==== General troubleshooting + +[[remote-clusters-troubleshooting-check-connection]] +===== Checking whether a remote cluster has connected successfully + +A successful call to the cluster settings update API for adding or updating +remote clusters does not necessarily mean the configuration is successful. +Use the <> to verify that a local +cluster is successfully connected to a remote cluster. + +include::remote-clusters-remote-info.asciidoc[] + +[[remote-clusters-troubleshooting-enable-server]] +===== Enabling the remote cluster server + +When using API key authentication, cross-cluster traffic happens on the remote +cluster interface, instead of the transport interface. The remote cluster +interface is not enabled by default. This means a node is not ready to accept +incoming cross-cluster requests by default, while it is ready to send outgoing +cross-cluster requests. Ensure you've enabled the remote cluster server on every +node of the remote cluster. In `elasticsearch.yml`: + +* Set <> to +`true`. +* Configure the bind and publish address for remote cluster server traffic, for +example using <>. Without +configuring the address, remote cluster traffic may be bound to the local +interface, and remote clusters running on other machines can't connect. +* Optionally, configure the remote server port using +<> (defaults to `9443`). + +[[remote-clusters-troubleshooting-common-issues]] +==== Common issues + +The following issues are listed in the order they may occur while setting up a +remote cluster. + +[[remote-clusters-not-reachable]] +===== Remote cluster not reachable + +====== Symptom + +A local cluster may not be able to reach a remote cluster for many reasons. For +example, the remote cluster server may not be enabled, an incorrect host or port +may be configured, or a firewall may be blocking traffic. When a remote cluster +is not reachable, check the logs of the local cluster for a `connect_exception`. + +When the remote cluster is configured using proxy mode: +[source,txt,subs=+quotes] +---- +[2023-06-28T16:36:47,264][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.ConnectTransportException: [][192.168.0.42:9443] *connect_exception* +---- + +When the remote cluster is configured using sniff mode: +[source,txt,subs=+quotes] +---- +[2023-06-28T16:38:37,731][WARN ][o.e.t.SniffConnectionStrategy] [local-node] fetching nodes from external cluster [my] failed +org.elasticsearch.transport.ConnectTransportException: [][192.168.0.42:9443] *connect_exception* +---- + +====== Resolution + +* Check the host and port for the remote cluster are correct. +* Ensure the <> on the remote cluster. +* Ensure no firewall is blocking the communication. + +[[remote-clusters-troubleshooting-tls-trust]] +===== TLS trust not established + +TLS can be misconfigured on the local or the remote cluster. The result is that +the local cluster does not trust the certificate presented by the remote +cluster. + +====== Symptom + +The local cluster logs `failed to establish trust with server`: + +[source,txt,subs=+quotes] +---- +[2023-06-29T09:40:55,465][WARN ][o.e.c.s.DiagnosticTrustManager] [local-node] *failed to establish trust with server* at [192.168.0.42]; the server provided a certificate with subject name [CN=remote_cluster], fingerprint [529de35e15666ffaa26afa50876a2a48119db03a], no keyUsage and no extendedKeyUsage; the certificate is valid between [2023-01-29T12:08:37Z] and [2032-08-29T12:08:37Z] (current time is [2023-08-16T23:40:55.464275Z], certificate dates are valid); the session uses cipher suite [TLS_AES_256_GCM_SHA384] and protocol [TLSv1.3]; the certificate has subject alternative names [DNS:localhost,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1,DNS:localhost4,DNS:localhost6,DNS:localhost.localdomain,DNS:localhost4.localdomain4,IP:192.168.0.42]; the certificate is issued by [CN=Elastic Auto RemoteCluster CA] but the server did not provide a copy of the issuing certificate in the certificate chain; this ssl context ([(shared) (with trust configuration: JDK-trusted-certs)]) is not configured to trust that issuer but trusts [97] other issuers +sun.security.validator.ValidatorException: PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target +---- + +The remote cluster logs `client did not trust this server's certificate`: + +[source,txt,subs=+quotes] +---- +[2023-06-29T09:40:55,478][WARN ][o.e.x.c.s.t.n.SecurityNetty4Transport] [remote-node] *client did not trust this server's certificate*, closing connection Netty4TcpChannel{localAddress=/192.168.0.42:9443, remoteAddress=/192.168.0.84:57305, profile=_remote_cluster} +---- + +====== Resolution + +Read the warn log message on the local cluster carefully to determine the exact +cause of the failure. For example: + +* Is the remote cluster certificate not signed by a trusted CA? This is the most +likely cause. +* Is hostname verification failing? +* Is the certificate expired? + +Once you know the cause, you should be able to fix it by adjusting the remote +cluster related SSL settings on either the local cluster or the remote cluster. + +Often, the issue is on the local cluster. For example, fix it by configuring necessary +trusted CAs (`xpack.security.remote_cluster_client.ssl.certificate_authorities`). + +If you change the `elasticsearch.yml` file, the associated cluster needs to be +restarted for the changes to take effect. + +[[remote-clusters-troubleshooting-api-key]] +==== API key authentication issues + +[[remote-clusters-troubleshooting-transport-port-api-key]] +===== Connecting to transport port when using API key authentication + +When using API key authentication, a local cluster should connect to a remote +cluster's remote cluster server port (defaults to `9443`) instead of the +transport port (defaults to `9300`). A misconfiguration can lead to a number of +symptoms: + +====== Symptom 1 + +It's recommended to use different CAs and certificates for the transport +interface and the remote cluster server interface. If this recommendation is +followed, a remote cluster client node does not trust the server certificate +presented by a remote cluster on the transport interface. + +The local cluster logs `failed to establish trust with server`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:48:46,575][WARN ][o.e.c.s.DiagnosticTrustManager] [local-node] *failed to establish trust with server* at [1192.168.0.42]; the server provided a certificate with subject name [CN=transport], fingerprint [c43e628be2a8aaaa4092b82d78f2bc206c492322], no keyUsage and no extendedKeyUsage; the certificate is valid between [2023-01-29T12:05:53Z] and [2032-08-29T12:05:53Z] (current time is [2023-06-28T02:48:46.574738Z], certificate dates are valid); the session uses cipher suite [TLS_AES_256_GCM_SHA384] and protocol [TLSv1.3]; the certificate has subject alternative names [DNS:localhost,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1,DNS:localhost4,DNS:localhost6,DNS:localhost.localdomain,DNS:localhost4.localdomain4,IP:192.168.0.42]; the certificate is issued by [CN=Elastic Auto Transport CA] but the server did not provide a copy of the issuing certificate in the certificate chain; this ssl context ([xpack.security.remote_cluster_client.ssl (with trust configuration: PEM-trust{/rcs2/ssl/remote-cluster-ca.crt})]) is not configured to trust that issuer, it only trusts the issuer [CN=Elastic Auto RemoteCluster CA] with fingerprint [ba2350661f66e46c746c1629f0c4b645a2587ff4] +sun.security.validator.ValidatorException: PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target +---- + +The remote cluster logs `client did not trust this server's certificate`: +[source,txt,subs=+quotes] +---- +[2023-06-28T12:48:46,584][WARN ][o.e.x.c.s.t.n.SecurityNetty4Transport] [remote-node] *client did not trust this server's certificate*, closing connection Netty4TcpChannel{localAddress=/192.168.0.42:9309, remoteAddress=/192.168.0.84:60810, profile=default} +---- + +====== Symptom 2 + +The CA and certificate can be shared between the transport and remote cluster +server interface. Since a remote cluster client does not have a client +certificate by default, the server will fail to verify the client certificate. + +The local cluster logs `Received fatal alert: bad_certificate`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:43:30,705][WARN ][o.e.t.TcpTransport ] [local-node] exception caught on transport layer [Netty4TcpChannel{localAddress=/192.168.0.84:60738, remoteAddress=/192.168.0.42:9309, profile=_remote_cluster}], closing connection +io.netty.handler.codec.DecoderException: javax.net.ssl.SSLHandshakeException: *Received fatal alert: bad_certificate* +---- + +The remote cluster logs `Empty client certificate chain`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:43:30,772][WARN ][o.e.t.TcpTransport ] [remote-node] exception caught on transport layer [Netty4TcpChannel{localAddress=/192.168.0.42:9309, remoteAddress=/192.168.0.84:60783, profile=default}], closing connection +io.netty.handler.codec.DecoderException: javax.net.ssl.SSLHandshakeException: *Empty client certificate chain* +---- + +====== Symptom 3 + +If the remote cluster client is configured for mTLS and provides a valid client +certificate, the connection fails because the client does not send the expected +authentication header. + +The local cluster logs `missing authentication`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:04:52,710][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9309][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: *missing authentication* credentials for action [cluster:internal/remote_cluster/handshake] +---- + +This does not show up in the logs of the remote cluster. + +====== Symptom 4 + +If anonymous access is enabled on the remote cluster and it does not require +authentication, depending on the privileges of the anonymous user, the local +cluster may log the following. + +If the anonymous user does not the have necessary privileges to make a +connection, the local cluster logs `unauthorized`: + +[source,txt,subs=+quotes] +---- +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9309][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: action [cluster:internal/remote_cluster/handshake] is *unauthorized* for user [anonymous_foo] with effective roles [reporting_user], this action is granted by the cluster privileges [cross_cluster_search,cross_cluster_replication,manage,all] +---- + +If the anonymous user has necessary privileges, for example it is a superuser, +the local cluster logs `requires channel profile to be [_remote_cluster], +but got [default]`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:09:52,031][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9309][cluster:internal/remote_cluster/handshake] +Caused by: java.lang.IllegalArgumentException: remote cluster handshake action *requires channel profile to be [_remote_cluster], but got [default]* +---- + +====== Resolution + +Check the port number and ensure you are indeed connecting to the remote cluster +server instead of the transport interface. + +[[remote-clusters-troubleshooting-no-api-key]] +===== Connecting without a cross-cluster API key + +A local cluster uses the presence of a cross-cluster API key to determine the +model with which it connects to a remote cluster. If a cross-cluster API key is +present, it uses API key based authentication. Otherwise, it uses certificate +based authentication. You can check what model is being used with the <> on the local cluster: + +include::remote-clusters-remote-info.asciidoc[] + +Besides checking the response of the remote cluster info API, you can also check +the logs. + +====== Symptom 1 + +If no cross-cluster API key is used, the local cluster uses the certificate +based authentication method, and connects to the remote cluster using the TLS +configuration of the transport interface. If the remote cluster has different +TLS CA and certificate for transport and remote cluster server interfaces (which +is the recommendation), TLS verification will fail. + +The local cluster logs `failed to establish trust with server`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:51:06,452][WARN ][o.e.c.s.DiagnosticTrustManager] [local-node] *failed to establish trust with server* at []; the server provided a certificate with subject name [CN=remote_cluster], fingerprint [529de35e15666ffaa26afa50876a2a48119db03a], no keyUsage and no extendedKeyUsage; the certificate is valid between [2023-01-29T12:08:37Z] and [2032-08-29T12:08:37Z] (current time is [2023-06-28T02:51:06.451581Z], certificate dates are valid); the session uses cipher suite [TLS_AES_256_GCM_SHA384] and protocol [TLSv1.3]; the certificate has subject alternative names [DNS:localhost,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1,DNS:localhost4,DNS:localhost6,DNS:localhost.localdomain,DNS:localhost4.localdomain4,IP:192.168.0.42]; the certificate is issued by [CN=Elastic Auto RemoteCluster CA] but the server did not provide a copy of the issuing certificate in the certificate chain; this ssl context ([xpack.security.transport.ssl (with trust configuration: PEM-trust{/rcs2/ssl/transport-ca.crt})]) is not configured to trust that issuer, it only trusts the issuer [CN=Elastic Auto Transport CA] with fingerprint [bbe49e3f986506008a70ab651b188c70df104812] +sun.security.validator.ValidatorException: PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target +---- + +The remote cluster logs `client did not trust this server's certificate`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:52:16,914][WARN ][o.e.x.c.s.t.n.SecurityNetty4Transport] [remote-node] *client did not trust this server's certificate*, closing connection Netty4TcpChannel{localAddress=/192.168.0.42:9443, remoteAddress=/192.168.0.84:60981, profile=_remote_cluster} +---- + +====== Symptom 2 + +Even if TLS verification is not an issue, the connection fails due to missing +credentials. + +The local cluster logs `Please ensure you have configured remote cluster credentials`: + +[source,txt,subs=+quotes] +---- +Caused by: java.lang.IllegalArgumentException: Cross cluster requests through the dedicated remote cluster server port require transport header [_cross_cluster_access_credentials] but none found. *Please ensure you have configured remote cluster credentials* on the cluster originating the request. +---- + +This does not show up in the logs of the remote cluster. + +====== Resolution + +Add the cross-cluster API key to {es} keystore on every node of the local +cluster. Restart the local cluster to reload the keystore. + +[[remote-clusters-troubleshooting-wrong-api-key-type]] +===== Using the wrong API key type + +API key based authentication requires +<>. It does +not work with <>. + +====== Symptom + +The local cluster logs `authentication expected API key type of [cross_cluster]`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:26:53,962][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9443][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: *authentication expected API key type of [cross_cluster]*, but API key [agZXJocBmA2beJfq2yKu] has type [rest] +---- + +This does not show up in the logs of the remote cluster. + +====== Resolution + +Ask the remote cluster administrator to create and distribute a +<>. Replace the +existing API key in the {es} keystore with this cross-cluster API key on every +node of the local cluster. Restart the local cluster for keystore changes to +take effect. + +[[remote-clusters-troubleshooting-non-valid-api-key]] +===== Invalid API key + +A cross-cluster API can fail to authenticate. For example, when its credentials +are incorrect, or if it's invalidated or expired. + +====== Symptom + +The local cluster logs `unable to authenticate`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:22:58,264][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9443][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: *unable to authenticate* user [agZXJocBmA2beJfq2yKu] for action [cluster:internal/remote_cluster/handshake] +---- + +The remote cluster logs `Authentication using apikey failed`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:24:38,744][WARN ][o.e.x.s.a.ApiKeyAuthenticator] [remote-node] *Authentication using apikey failed* - invalid credentials for API key [agZXJocBmA2beJfq2yKu] +---- + +====== Resolution + +Ask the remote cluster administrator to create and distribute a +<>. Replace the +existing API key in the {es} keystore with this cross-cluster API key on every +node of the local cluster. Restart the local cluster for keystore changes to +take effect. + +[[remote-clusters-troubleshooting-insufficient-privileges]] +===== API key or local user has insufficient privileges + +The effective permission for a local user running requests on a remote cluster +is determined by the intersection of the cross-cluster API key's privileges and +the local user's `remote_indices` privileges. + +====== Symptom + +Request failures due to insufficient privileges result in API responses like: + +[source,js,subs=+quotes] +---- +{ + "type": "security_exception", + "reason": "action [indices:data/read/search] towards remote cluster is *unauthorized for user* [foo] with assigned roles [foo-role] authenticated by API key id [agZXJocBmA2beJfq2yKu] of user [elastic-admin] on indices [cd], this action is granted by the index privileges [read,all]" +} +---- +// NOTCONSOLE + +This does not show up in any logs. + +====== Resolution + +. Check that the local user has the necessary `remote_indices` privileges. Grant sufficient `remote_indices` privileges if necessary. +. If permission is not an issue locally, ask the remote cluster administrator to +create and distribute a +<>. Replace the +existing API key in the {es} keystore with this cross-cluster API key on every +node of the local cluster. Restart the local cluster for keystore changes to +take effect. + +[[remote-clusters-troubleshooting-no-remote_indices-privileges]] +===== Local user has no `remote_indices` privileges + +This is a special case of insufficient privileges. In this case, the local user +has no `remote_indices` privileges at all for the target remote cluster. {es} +can detect that and issue a more explicit error response. + +====== Symptom + +This results in API responses like: + +[source,js,subs=+quotes] +---- +{ + "type": "security_exception", + "reason": "action [indices:data/read/search] towards remote cluster [my] is unauthorized for user [foo] with effective roles [] (assigned roles [foo-role] were not found) because *no remote indices privileges apply for the target cluster*" +} +---- +// NOTCONSOLE + +====== Resolution + +Grant sufficient `remote_indices` privileges to the local user. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 6d2a3f75d13eb..8a0feefeaf21f 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -97,3 +97,5 @@ include::cluster/remote-clusters-cert.asciidoc[] include::cluster/remote-clusters-migration.asciidoc[] include::cluster/remote-clusters-settings.asciidoc[] + +include::cluster/remote-clusters-troubleshooting.asciidoc[] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 0ad9bc47c194f..6120a7a5391c4 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -83,7 +83,7 @@ The following checks were performed on each of these signatures: Use Docker commands to start a single-node {es} cluster for development or testing. You can then run additional Docker commands to add nodes to the test -cluster. +cluster or run {kib}. TIP: This setup doesn't run multiple {es} nodes or {kib} by default. To create a multi-node cluster with {kib}, use Docker Compose instead. See @@ -197,6 +197,81 @@ curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_c ---- // NOTCONSOLE +[[run-kibana-docker]] +===== Run {kib} + +. Pull the {kib} Docker image. ++ +ifeval::["{release-state}"=="unreleased"] +WARNING: Version {version} of {kib} has not yet been released, so no +Docker image is currently available for this version. +endif::[] ++ +[source,sh,subs="attributes"] +---- +docker pull {kib-docker-image} +---- + +. Optional: Verify the {kib} image's signature. ++ +ifeval::["{release-state}"=="unreleased"] +WARNING: Version {version} of {kib} has not yet been released, so no +Docker image signature is currently available for this version. +endif::[] ++ +[source,sh,subs="attributes"] +---- +wget https://artifacts.elastic.co/cosign.pub +cosign verify --key cosign.pub {kib-docker-image} +---- + +. Start a {kib} container. ++ +[source,sh,subs="attributes"] +---- +docker run --name kib01 --net elastic -p 5601:5601 {kib-docker-image} +---- + +. When {kib} starts, it outputs a unique generated link to the terminal. To +access {kib}, open this link in a web browser. + +. In your browser, enter the enrollment token that was generated when you started {es}. ++ +To regenerate the token, run: ++ +[source,sh] +---- +docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana +---- + +. Log in to {kib} as the `elastic` user with the password that was generated +when you started {es}. ++ +To regenerate the password, run: ++ +[source,sh] +---- +docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password +---- + +[[remove-containers-docker]] +===== Remove containers + +To remove the containers and their network, run: + +[source,sh,subs="attributes"] +---- +# Remove the Elastic network +docker network rm elastic + +# Remove {es} containers +docker rm es01 +docker rm es02 + +# Remove the {kib} container +docker rm kib01 +---- + ===== Next steps You now have a test {es} environment set up. Before you start diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index edab1d40dee8c..f263456d039d5 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -55,7 +55,8 @@ fix problems that an {es} deployment might encounter. * <> * <> * <> -* <> +* <> +* <> If none of these solutions relate to your issue, you can still get help: diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 385049cdc78a7..ccb7b4dea36ee 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -656,9 +656,9 @@ - - - + + + diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index c4f8915811aee..c6104e92b0b3e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -322,6 +322,7 @@ public TokenStream create(TokenStream tokenStream) { filters.put("pattern_capture", requiresAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); + filters.put("persian_stem", PersianStemTokenFilterFactory::new); filters.put("porter_stem", PorterStemTokenFilterFactory::new); filters.put( "predicate_token_filter", diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianStemTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianStemTokenFilterFactory.java new file mode 100644 index 0000000000000..4fcf3fe896fbd --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianStemTokenFilterFactory.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.fa.PersianStemFilter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; + +public class PersianStemTokenFilterFactory extends AbstractTokenFilterFactory { + + PersianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new PersianStemFilter(tokenStream); + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index 4ef2f837368c9..8f9a882e29d2a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.en.KStemFilter; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.es.SpanishLightStemFilter; +import org.apache.lucene.analysis.fa.PersianStemFilter; import org.apache.lucene.analysis.fi.FinnishLightStemFilter; import org.apache.lucene.analysis.fr.FrenchLightStemFilter; import org.apache.lucene.analysis.fr.FrenchMinimalStemFilter; @@ -213,6 +214,10 @@ public TokenStream create(TokenStream tokenStream) { } else if ("minimal_nynorsk".equalsIgnoreCase(language) || "minimalNynorsk".equalsIgnoreCase(language)) { return new NorwegianMinimalStemFilter(tokenStream, NorwegianLightStemmer.NYNORSK); + // Persian stemmers + } else if ("persian".equalsIgnoreCase(language)) { + return new PersianStemFilter(tokenStream); + // Portuguese stemmers } else if ("portuguese".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new PortugueseStemmer()); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index 777349ee81c93..f147cb47a2c01 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -117,6 +117,7 @@ protected Map> getTokenFilters() { filters.put("hindinormalization", HindiNormalizationFilterFactory.class); filters.put("indicnormalization", IndicNormalizationFilterFactory.class); filters.put("persiannormalization", PersianNormalizationFilterFactory.class); + filters.put("persianstem", PersianStemTokenFilterFactory.class); filters.put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class); filters.put("serbiannormalization", SerbianNormalizationFilterFactory.class); filters.put("soraninormalization", SoraniNormalizationFilterFactory.class); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java index 5ed3bdfbd1d4e..1f27c43f2f2f4 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java @@ -223,6 +223,83 @@ public void testTsdbDataStreamsNanos() throws Exception { assertTsdbDataStream(); } + public void testTsbdDataStreamComponentTemplateWithAllSettingsAndMappings() throws Exception { + // Different component and index template. All settings and mapping are in component template. + final String COMPONENT_TEMPLATE_WITH_SETTINGS_AND_MAPPINGS = """ + { + "template": { + "settings":{ + "index": { + "mode": "time_series", + "routing_path": ["metricset", "k8s.pod.uid"] + } + }, + "mappings":{ + "properties": { + "@timestamp" : { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "network": { + "properties": { + "tx": { + "type": "long" + }, + "rx": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + } + """; + final String DELEGATE_TEMPLATE = """ + { + "index_patterns": ["k8s*"], + "composed_of": ["custom_template"], + "data_stream": { + } + }"""; + + // Delete and add new the templates: + var deleteRequest = new Request("DELETE", "/_index_template/1"); + assertOK(client().performRequest(deleteRequest)); + deleteRequest = new Request("DELETE", "/_component_template/custom_template"); + assertOK(client().performRequest(deleteRequest)); + var request = new Request("POST", "/_component_template/custom_template"); + request.setJsonEntity(COMPONENT_TEMPLATE_WITH_SETTINGS_AND_MAPPINGS); + assertOK(client().performRequest(request)); + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(DELEGATE_TEMPLATE); + assertOK(client().performRequest(request)); + + // Ensure everything behaves the same, regardless of the fact that all settings and mappings are in component template: + assertTsdbDataStream(); + } + private void assertTsdbDataStream() throws IOException { var bulkRequest = new Request("POST", "/k8s/_bulk"); bulkRequest.setJsonEntity(BULK.replace("$now", formatInstantNanos(Instant.now()))); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 7a63a1aa4150a..a61d7a6f393cc 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -83,7 +83,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.SUCCESS; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_DOWNSAMPLE_STATUS; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; -import static org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask.REPLACEMENT_SOURCE_INDEX; /** * This service will implement the needed actions (e.g. rollover, retention) to manage the data streams with a data stream lifecycle @@ -128,7 +127,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab /* * This is the key for data stream lifecycle related custom index metadata. */ - static final String FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY = "force_merge_completed_timestamp"; + public static final String FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY = "force_merge_completed_timestamp"; private final Settings settings; private final Client client; private final ClusterService clusterService; @@ -396,42 +395,24 @@ Set maybeExecuteDownsampling(ClusterState state, DataStream dataStream, L String indexName = index.getName(); IndexMetadata.DownsampleTaskStatus backingIndexDownsamplingStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndexMeta.getSettings()); - String backingIndexDownsamplingSource = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(backingIndexMeta.getSettings()); + String downsamplingSourceIndex = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(backingIndexMeta.getSettings()); // if the current index is not a downsample we want to mark the index as read-only before proceeding with downsampling - if (org.elasticsearch.common.Strings.hasText(backingIndexDownsamplingSource) == false + if (org.elasticsearch.common.Strings.hasText(downsamplingSourceIndex) == false && state.blocks().indexBlocked(ClusterBlockLevel.WRITE, indexName) == false) { affectedIndices.add(index); addIndexBlockOnce(indexName); - } else if (org.elasticsearch.common.Strings.hasText(backingIndexDownsamplingSource) + } else if (org.elasticsearch.common.Strings.hasText(downsamplingSourceIndex) && backingIndexDownsamplingStatus.equals(SUCCESS)) { // if the backing index is a downsample index itself, let's check if its source index still exists as we must delete it - Map lifecycleMetadata = backingIndexMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY); - - // TODO document that we don't handle downsample indices that were added to the data stream manually (because we - // TODO currently can't reliably identify the source index to delete when multiple rounds of donwsampling are - // TODO involved unless DSL stores the needed metadata in the index metadata) - if (lifecycleMetadata != null && lifecycleMetadata.containsKey(REPLACEMENT_SOURCE_INDEX)) { - String actualDownsamplingSource = lifecycleMetadata.get(REPLACEMENT_SOURCE_INDEX); - IndexMetadata downsampleSourceIndex = metadata.index(actualDownsamplingSource); - if (downsampleSourceIndex != null) { - // we mark the backing index as affected as we don't want subsequent operations that might change its state to - // be performed, as we might lose the way to identify that we must delete its replacement source index - affectedIndices.add(index); - // delete downsampling source index (that's not part of the data stream anymore) before doing any more - // downsampling - deleteIndexOnce(backingIndexDownsamplingSource, "replacement with its downsampled index in the data stream"); - } - } else { - logger.trace( - "Data stream lifecycle encountered managed index [{}] as part of data stream [{}] which was " - + "downsampled from source [{} ]. This index was manually downsampled but data stream lifecycle service " - + "only supports downsampled indices through the data stream lifecycle. This index will be ignored from " - + "lifecycle donwsampling", - indexName, - dataStream, - backingIndexDownsamplingSource - ); + IndexMetadata downsampleSourceIndex = metadata.index(downsamplingSourceIndex); + if (downsampleSourceIndex != null) { + // we mark the backing index as affected as we don't want subsequent operations that might change its state to + // be performed, as we might lose the way to identify that we must delete its replacement source index + affectedIndices.add(index); + // delete downsampling source index (that's not part of the data stream anymore) before doing any more + // downsampling + deleteIndexOnce(downsamplingSourceIndex, "replacement with its downsampled index in the data stream"); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java index ba9e79962e323..70cf57456e099 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; -import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -35,7 +34,6 @@ */ public class ReplaceSourceWithDownsampleIndexTask implements ClusterStateTaskListener { private static final Logger LOGGER = LogManager.getLogger(ReplaceSourceWithDownsampleIndexTask.class); - public static final String REPLACEMENT_SOURCE_INDEX = "replacement_source_index"; private ActionListener listener; private final String dataStreamName; private final String sourceBackingIndex; @@ -166,13 +164,11 @@ private static IndexMetadata copyDataStreamLifecycleState( ) { IndexMetadata.Builder downsampleIndexBuilder = IndexMetadata.builder(dest); Map lifecycleCustomMetadata = source.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY); - Map newCustomMetadata = new HashMap<>(); if (lifecycleCustomMetadata != null) { - newCustomMetadata.putAll(lifecycleCustomMetadata); + // this will, for now, ensure that DSL tail merging is skipped for the downsample index (and it should be as the downsample + // transport action forcemerged the downsample index to 1 segment) + downsampleIndexBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, lifecycleCustomMetadata); } - newCustomMetadata.put(REPLACEMENT_SOURCE_INDEX, source.getIndex().getName()); - downsampleIndexBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, newCustomMetadata); - if (IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.exists(dest.getSettings()) == false) { downsampleIndexBuilder.settings( Settings.builder() diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java index 1107ecdf5a071..c0cb1e5452c3d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -36,12 +37,14 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.elasticsearch.datastreams.DataStreamIndexSettingsProvider.FORMATTER; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; @@ -338,6 +341,164 @@ public void testChangingIndexModeFromTimeSeriesToSomethingElseNoEffectOnExisting } } + public void testRolloverClusterStateWithBrokenOlderTsdbDataStream() throws Exception { + Instant now = Instant.now(); + String dataStreamName = "metrics-my-app"; + int numberOfBackingIndices = randomIntBetween(1, 3); + ClusterState clusterState = createClusterState(dataStreamName, numberOfBackingIndices, now, true); + DataStream dataStream = clusterState.metadata().dataStreams().get(dataStreamName); + + ThreadPool testThreadPool = new TestThreadPool(getTestName()); + try { + MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService( + dataStream, + testThreadPool, + Set.of(createSettingsProvider(xContentRegistry())), + xContentRegistry() + ); + MaxDocsCondition condition = new MaxDocsCondition(randomNonNegativeLong()); + List> metConditions = Collections.singletonList(condition); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); + IndexMetadataStats indexStats = new IndexMetadataStats(IndexWriteLoad.builder(1).build(), 10, 10); + + long before = testThreadPool.absoluteTimeInMillis(); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + createIndexRequest, + metConditions, + now, + randomBoolean(), + false, + indexStats + ); + long after = testThreadPool.absoluteTimeInMillis(); + + String sourceIndexName = DataStream.getDefaultBackingIndexName(dataStream.getName(), dataStream.getGeneration()); + String newIndexName = DataStream.getDefaultBackingIndexName(dataStream.getName(), dataStream.getGeneration() + 1); + assertEquals(sourceIndexName, rolloverResult.sourceIndexName()); + assertEquals(newIndexName, rolloverResult.rolloverIndexName()); + Metadata rolloverMetadata = rolloverResult.clusterState().metadata(); + assertEquals(dataStream.getIndices().size() + 1, rolloverMetadata.indices().size()); + IndexMetadata rolloverIndexMetadata = rolloverMetadata.index(newIndexName); + + IndexAbstraction ds = rolloverMetadata.getIndicesLookup().get(dataStream.getName()); + assertThat(ds.getType(), equalTo(IndexAbstraction.Type.DATA_STREAM)); + assertThat(ds.getIndices(), hasSize(dataStream.getIndices().size() + 1)); + assertThat(ds.getIndices(), hasItem(rolloverMetadata.index(sourceIndexName).getIndex())); + assertThat(ds.getIndices(), hasItem(rolloverIndexMetadata.getIndex())); + assertThat(ds.getWriteIndex(), equalTo(rolloverIndexMetadata.getIndex())); + assertThat(((DataStream) ds).getIndexMode(), equalTo(IndexMode.TIME_SERIES)); + + RolloverInfo info = rolloverMetadata.index(sourceIndexName).getRolloverInfos().get(dataStream.getName()); + assertThat(info.getTime(), lessThanOrEqualTo(after)); + assertThat(info.getTime(), greaterThanOrEqualTo(before)); + assertThat(info.getMetConditions(), hasSize(1)); + assertThat(info.getMetConditions().get(0).value(), equalTo(condition.value())); + + for (int i = 0; i < numberOfBackingIndices; i++) { + var im = rolloverMetadata.index(rolloverMetadata.dataStreams().get(dataStreamName).getIndices().get(i)); + var startTime1 = IndexSettings.TIME_SERIES_START_TIME.get(im.getSettings()); + var endTime1 = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); + assertThat(startTime1.toEpochMilli(), equalTo(DateUtils.MAX_MILLIS_BEFORE_MINUS_9999)); + assertThat(endTime1.toEpochMilli(), equalTo(DateUtils.MAX_MILLIS_BEFORE_9999)); + assertThat(im.getIndexMode(), equalTo(null)); + } + { + var im = rolloverMetadata.index( + rolloverMetadata.dataStreams().get(dataStreamName).getIndices().get(numberOfBackingIndices) + ); + var lastStartTime = IndexSettings.TIME_SERIES_START_TIME.get(im.getSettings()); + var kastEndTime = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); + assertThat(lastStartTime, equalTo(now.minus(2, ChronoUnit.HOURS).truncatedTo(ChronoUnit.SECONDS))); + assertThat(kastEndTime, equalTo(now.plus(2, ChronoUnit.HOURS).truncatedTo(ChronoUnit.SECONDS))); + assertThat(im.getIndexMode(), equalTo(IndexMode.TIME_SERIES)); + } + } finally { + testThreadPool.shutdown(); + } + } + + public void testRolloverClusterStateWithBrokenTsdbDataStream() throws Exception { + Instant now = Instant.now(); + String dataStreamName = "metrics-my-app"; + int numberOfBackingIndices = randomIntBetween(1, 3); + ClusterState clusterState = createClusterState(dataStreamName, numberOfBackingIndices, now, false); + DataStream dataStream = clusterState.metadata().dataStreams().get(dataStreamName); + + ThreadPool testThreadPool = new TestThreadPool(getTestName()); + try { + MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService( + dataStream, + testThreadPool, + Set.of(createSettingsProvider(xContentRegistry())), + xContentRegistry() + ); + MaxDocsCondition condition = new MaxDocsCondition(randomNonNegativeLong()); + List> metConditions = Collections.singletonList(condition); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); + IndexMetadataStats indexStats = new IndexMetadataStats(IndexWriteLoad.builder(1).build(), 10, 10); + + Exception e = expectThrows( + IllegalArgumentException.class, + () -> rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + createIndexRequest, + metConditions, + now, + randomBoolean(), + false, + indexStats + ) + ); + assertThat(e.getMessage(), containsString("is overlapping with backing index")); + } finally { + testThreadPool.shutdown(); + } + } + + private static ClusterState createClusterState(String dataStreamName, int numberOfBackingIndices, Instant now, boolean includeVersion) { + List backingIndices = new ArrayList<>(numberOfBackingIndices); + for (int i = 1; i <= numberOfBackingIndices; i++) { + backingIndices.add(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, i, now.toEpochMilli()), "uuid" + i)); + } + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + numberOfBackingIndices, + null, + false, + false, + false, + false, + null + ); + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + .template( + new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); + Metadata.Builder builder = Metadata.builder(); + builder.put("template", template); + + for (Index backingIndex : backingIndices) { + var settings = settings(IndexVersion.current()).put("index.hidden", true) + .put(SETTING_INDEX_UUID, backingIndex.getUUID()) + .put("index.mode", "time_series") + .put("index.routing_path", "uid"); + if (includeVersion) { + settings.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.V_8_9_0); + } + builder.put(IndexMetadata.builder(backingIndex.getName()).settings(settings).numberOfShards(1).numberOfReplicas(0)); + } + builder.put(dataStream); + return ClusterState.builder(new ClusterName("test")).metadata(builder).build(); + } + static DataStreamIndexSettingsProvider createSettingsProvider(NamedXContentRegistry xContentRegistry) { return new DataStreamIndexSettingsProvider( im -> MapperTestUtils.newMapperService(xContentRegistry, createTempDir(), im.getSettings(), im.getIndex().getName()) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java index f40abbd1f1573..c3d1262c72dce 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java @@ -24,10 +24,11 @@ import org.junit.Before; import java.util.Locale; +import java.util.Map; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleFixtures.createDataStream; -import static org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask.REPLACEMENT_SOURCE_INDEX; +import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -159,6 +160,12 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); + // let's add some lifecycle custom metadata to the first generation index + IndexMetadata indexMetadata = previousState.metadata().index(firstGenIndex); + IndexMetadata.Builder firstGenBuilder = IndexMetadata.builder(indexMetadata) + .putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, Map.of(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY, String.valueOf(now))); + Metadata.Builder metaBuilder = Metadata.builder(previousState.metadata()).put(firstGenBuilder); + previousState = ClusterState.builder(previousState).metadata(metaBuilder).build(); ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( previousState ); @@ -181,14 +188,57 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(rolloverInfo.getTime())); - // the donwsample index contains metadata to remember the index we downsampled from - assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), is(notNullValue())); + assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), notNullValue()); assertThat( - downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY).get(REPLACEMENT_SOURCE_INDEX), - is(sourceIndexAbstraction.getName()) + downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY).get(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY), + is(String.valueOf(now)) ); } + public void testSourceWithoutLifecycleMetaAndDestWithOriginationDateAlreadyConfigured() { + String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + int numBackingIndices = 3; + Metadata.Builder builder = Metadata.builder(); + DataStream dataStream = createDataStream( + builder, + dataStreamName, + numBackingIndices, + settings(IndexVersion.current()), + DataStreamLifecycle.newBuilder().dataRetention(TimeValue.MAX_VALUE).build(), + now + ); + String firstGenIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 1); + String downsampleIndex = "downsample-1s-" + firstGenIndex; + long downsampleOriginationDate = now - randomLongBetween(10_000, 12_000); + IndexMetadata.Builder downsampleIndexMeta = IndexMetadata.builder(downsampleIndex) + .settings( + settings(IndexVersion.current()).put(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.getKey(), downsampleOriginationDate) + ) + .numberOfShards(1) + .numberOfReplicas(0); + builder.put(downsampleIndexMeta); + builder.put(dataStream); + ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); + + ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( + previousState + ); + + IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); + assertThat(downsampleIndexAbstraction, is(notNullValue())); + assertThat(downsampleIndexAbstraction.getParentDataStream(), is(notNullValue())); + // the downsample index is part of the data stream + assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); + + // the source index is NOT part of the data stream + IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndex); + assertThat(sourceIndexAbstraction, is(notNullValue())); + assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + + IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); + assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(downsampleOriginationDate)); + } + public void testSourceIndexIsNotPartOfDSAnymore() { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); int numBackingIndices = 3; @@ -227,12 +277,6 @@ public void testSourceIndexIsNotPartOfDSAnymore() { IndexMetadata firstGenMeta = newState.metadata().index(firstGenIndex); RolloverInfo rolloverInfo = firstGenMeta.getRolloverInfos().get(dataStreamName); assertThat(rolloverInfo, is(notNullValue())); - - IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); - assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(rolloverInfo.getTime())); - // the donwsample index contains metadata to remember the index we downsampled from - assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), is(notNullValue())); - assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY).get(REPLACEMENT_SOURCE_INDEX), is(firstGenIndex)); } public void testListenersIsNonConsideredInEquals() { diff --git a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java index 4ce93563ceb22..af35e2de01f3a 100644 --- a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java +++ b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java @@ -54,7 +54,7 @@ public class MainResponse extends ActionResponse implements ToXContentObject { if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_031)) { wireLuceneVersion = IndexVersion.readVersion(in).luceneVersion().toString(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_019)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { TransportVersion.readVersion(in); } } @@ -114,7 +114,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_031)) { IndexVersion.writeVersion(IndexVersion.current(), out); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_019)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { TransportVersion.writeVersion(TransportVersion.current(), out); } } diff --git a/plugins/examples/settings.gradle b/plugins/examples/settings.gradle index ea6ecdb153f5b..3b6280dc3bcbd 100644 --- a/plugins/examples/settings.gradle +++ b/plugins/examples/settings.gradle @@ -7,7 +7,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.13.1" + id "com.gradle.enterprise" version "3.14.1" } // Include all subdirectories as example projects diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java index 74749d57630c7..03b820de58668 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java @@ -47,7 +47,6 @@ public static void cleanupFiles() { FileUtils.rm(instancesFile, certificatesFile); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99153") public void test10Install() throws Exception { install(); // Disable security auto-configuration as we want to generate keys/certificates manually here diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 05a55e64ffcf3..008a718be5873 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -54,6 +54,11 @@ protected final boolean preserveIndicesUponCompletion() { return true; } + @Override + protected final boolean preserveDataStreamsUponCompletion() { + return true; + } + @Override protected final boolean preserveReposUponCompletion() { return true; diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java new file mode 100644 index 0000000000000..19f24c97a47f8 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java @@ -0,0 +1,305 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.test.rest.ObjectPath; + +import java.io.IOException; +import java.time.Instant; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class TsdbIT extends AbstractRollingTestCase { + + private static final String TEMPLATE = """ + { + "settings":{ + "index": { + "mode": "time_series" + } + }, + "mappings":{ + "dynamic_templates": [ + { + "labels": { + "path_match": "pod.labels.*", + "mapping": { + "type": "keyword", + "time_series_dimension": true + } + } + } + ], + "properties": { + "@timestamp" : { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "network": { + "properties": { + "tx": { + "type": "long" + }, + "rx": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + """; + private static final String BULK = + """ + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507","ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508","ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509","ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510","ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9","ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "tiger", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea10","ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "lion", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876e11","ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876eb4","ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}} + """; + + private static final String DOC = """ + { + "@timestamp": "$time", + "metricset": "pod", + "k8s": { + "pod": { + "name": "dog", + "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", + "ip": "10.10.55.3", + "network": { + "tx": 1434595272, + "rx": 530605511 + } + } + } + } + """; + + public void testTsdbDataStream() throws Exception { + assumeTrue( + "Skipping version [" + UPGRADE_FROM_VERSION + "], because TSDB was GA-ed in 8.7.0", + UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_7_0) + ); + String dataStreamName = "k8s"; + if (CLUSTER_TYPE == ClusterType.OLD) { + final String INDEX_TEMPLATE = """ + { + "index_patterns": ["$PATTERN"], + "template": $TEMPLATE, + "data_stream": { + } + }"""; + // Add composable index template + String templateName = "1"; + var putIndexTemplateRequest = new Request("POST", "/_index_template/" + templateName); + putIndexTemplateRequest.setJsonEntity(INDEX_TEMPLATE.replace("$TEMPLATE", TEMPLATE).replace("$PATTERN", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + + performOldClustertOperations(templateName, dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + performMixedClusterOperations(dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + performUpgradedClusterOperations(dataStreamName); + } + } + + public void testTsdbDataStreamWithComponentTemplate() throws Exception { + assumeTrue( + "Skipping version [" + UPGRADE_FROM_VERSION + "], because TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", + UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_7_0) && UPGRADE_FROM_VERSION.before(Version.V_8_11_0) + ); + String dataStreamName = "template-with-component-template"; + if (CLUSTER_TYPE == ClusterType.OLD) { + final String COMPONENT_TEMPLATE = """ + { + "template": $TEMPLATE + } + """; + var putComponentTemplate = new Request("POST", "/_component_template/1"); + String template = TEMPLATE.replace("\"time_series\"", "\"time_series\", \"routing_path\": [\"k8s.pod.uid\"]"); + putComponentTemplate.setJsonEntity(COMPONENT_TEMPLATE.replace("$TEMPLATE", template)); + assertOK(client().performRequest(putComponentTemplate)); + final String INDEX_TEMPLATE = """ + { + "index_patterns": ["$PATTERN"], + "composed_of": ["1"], + "data_stream": { + } + }"""; + // Add composable index template + String templateName = "2"; + var putIndexTemplateRequest = new Request("POST", "/_index_template/" + templateName); + putIndexTemplateRequest.setJsonEntity(INDEX_TEMPLATE.replace("$PATTERN", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + + performOldClustertOperations(templateName, dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + performMixedClusterOperations(dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + performUpgradedClusterOperations(dataStreamName); + + var dataStreams = getDataStream(dataStreamName); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(2)); + String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); + { + var indices = getIndex(firstBackingIndex); + var escapedBackingIndex = firstBackingIndex.replace(".", "\\."); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), nullValue()); + String startTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"); + assertThat(startTime, nullValue()); + String endTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"); + assertThat(endTime, nullValue()); + } + String secondBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.1.index_name"); + { + var indices = getIndex(secondBackingIndex); + var escapedBackingIndex = secondBackingIndex.replace(".", "\\."); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), equalTo("time_series")); + String startTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"); + assertThat(startTime, notNullValue()); + String endTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"); + assertThat(endTime, notNullValue()); + } + } + } + + private void performUpgradedClusterOperations(String dataStreamName) throws Exception { + ensureGreen(dataStreamName); + var rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + assertOK(client().performRequest(rolloverRequest)); + + var dataStreams = getDataStream(dataStreamName); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(2)); + String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); + String secondBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.1.index_name"); + assertThat(secondBackingIndex, backingIndexEqualTo(dataStreamName, 2)); + indexDoc(dataStreamName); + assertSearch(dataStreamName, 10); + closeIndex(firstBackingIndex); + closeIndex(secondBackingIndex); + openIndex(firstBackingIndex); + openIndex(secondBackingIndex); + assertBusy(() -> { + try { + assertSearch(dataStreamName, 10); + } catch (Exception e) { + throw new AssertionError(e); + } + }); + } + + private static void performMixedClusterOperations(String dataStreamName) throws IOException { + ensureHealth(dataStreamName, request -> request.addParameter("wait_for_status", "yellow")); + if (FIRST_MIXED_ROUND) { + indexDoc(dataStreamName); + } + assertSearch(dataStreamName, 9); + } + + private static void performOldClustertOperations(String templateName, String dataStreamName) throws IOException { + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + bulkRequest.setJsonEntity(BULK.replace("$now", formatInstant(Instant.now()))); + bulkRequest.addParameter("refresh", "true"); + var response = client().performRequest(bulkRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat("errors in response:\n " + responseBody, responseBody.get("errors"), equalTo(false)); + + var dataStreams = getDataStream(dataStreamName); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams"), hasSize(1)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(1)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.template"), equalTo(templateName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.indices"), hasSize(1)); + String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); + assertThat(firstBackingIndex, backingIndexEqualTo(dataStreamName, 1)); + assertSearch(dataStreamName, 8); + } + + private static void indexDoc(String dataStreamName) throws IOException { + var indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity(DOC.replace("$time", formatInstant(Instant.now()))); + var response = client().performRequest(indexRequest); + assertOK(response); + } + + private static void assertSearch(String dataStreamName, int expectedHitCount) throws IOException { + var searchRequest = new Request("GET", dataStreamName + "/_search"); + var response = client().performRequest(searchRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat(ObjectPath.evaluate(responseBody, "hits.total.value"), equalTo(expectedHitCount)); + } + + private static String formatInstant(Instant instant) { + return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); + } + + private static Map getDataStream(String dataStreamName) throws IOException { + var getDataStreamsRequest = new Request("GET", "/_data_stream/" + dataStreamName); + var response = client().performRequest(getDataStreamsRequest); + assertOK(response); + return entityAsMap(response); + } + + private static Map getIndex(String indexName) throws IOException { + var getIndexRequest = new Request("GET", "/" + indexName + "?human"); + var response = client().performRequest(getIndexRequest); + assertOK(response); + return entityAsMap(response); + } + +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 068747d5a4824..0f829f20fe3c4 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -40,6 +40,11 @@ protected boolean preserveTemplatesUponCompletion() { return true; } + @Override + protected boolean preserveDataStreamsUponCompletion() { + return true; + } + public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml new file mode 100644 index 0000000000000..8e88111ad45be --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -0,0 +1,143 @@ +--- +"Indexing and searching sparse vectors": + + - skip: + version: " - 8.10.99" + reason: "sparse_vector field type reintroduced in 8.11" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + text: + type: text + ml.tokens: + type: sparse_vector + + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + body: + text: "running is good for you" + ml: + tokens: + running: 2.4097164 + good: 2.170997 + run: 2.052153 + race: 1.4575411 + for: 1.1908325 + runner: 1.1803857 + exercise: 1.1652642 + you: 0.9654308 + training: 0.94999343 + sports: 0.93650943 + fitness: 0.83129317 + best: 0.820365 + bad: 0.7385934 + health: 0.7098149 + marathon: 0.61555296 + gym: 0.5652374 + + - match: { result: "created" } + + - do: + index: + index: test + id: "2" + body: + text: "walking is a healthy exercise" + ml: + tokens: + walking: 2.4797723 + exercise: 2.074234 + healthy: 1.971596 + walk: 1.6458614 + health: 1.5291847 + walker: 1.4736869 + activity: 1.0793462 + good: 1.0597849 + fitness: 0.91855437 + training: 0.86342937 + movement: 0.7657065 + normal: 0.6694081 + foot: 0.5892523 + physical: 0.4926789 + + - match: { result: "created" } + + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + query: + bool: + should: + - term: + ml.tokens: + value: "walk" + boost: 1.9790847 + - term: + ml.tokens: + value: "walking" + boost: 1.7092685 + - term: + ml.tokens: + value: "exercise" + boost: 0.84076905 + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "1" } + +--- +"Sparse vector in 7.x": + - skip: + features: allowed_warnings + version: "8.0.0 - " + reason: "sparse_vector field type supported in 7.x" + - do: + allowed_warnings: + - "The [sparse_vector] field type is deprecated and will be removed in 8.0." + - "[sparse_vector] field type in old 7.x indices is allowed to contain [sparse_vector] fields, but they cannot be indexed or searched." + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + text: + type: text + ml.tokens: + type: sparse_vector + + - match: { acknowledged: true } + +--- +"Sparse vector in 8.x": + - skip: + version: " - 7.99.99, 8.11.0 - " + reason: "sparse_vector field type not supported in 8.x until 8.11.0" + - do: + catch: /The \[sparse_vector\] field type is no longer supported/ + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + text: + type: text + ml.tokens: + type: sparse_vector diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 8f8d982dd14c1..f561cc50b4f19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -227,7 +227,7 @@ private DiscoveryNode randomNode(String nodeId) { */ private ClusterState.Builder randomNodes(ClusterState clusterState) { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - Map transports = new HashMap<>(clusterState.transportVersions()); + Map versions = new HashMap<>(clusterState.compatibilityVersions()); List nodeIds = randomSubsetOf( randomInt(clusterState.nodes().getNodes().size() - 1), clusterState.nodes().getNodes().keySet().toArray(new String[0]) @@ -235,10 +235,10 @@ private ClusterState.Builder randomNodes(ClusterState clusterState) { for (String nodeId : nodeIds) { if (nodeId.startsWith("node-")) { nodes.remove(nodeId); - transports.remove(nodeId); + versions.remove(nodeId); if (randomBoolean()) { nodes.add(randomNode(nodeId)); - transports.put(nodeId, TransportVersionUtils.randomVersion(random())); + versions.put(nodeId, new CompatibilityVersions(TransportVersionUtils.randomVersion(random()))); } } } @@ -246,10 +246,10 @@ private ClusterState.Builder randomNodes(ClusterState clusterState) { for (int i = 0; i < additionalNodeCount; i++) { String id = "node-" + randomAlphaOfLength(10); nodes.add(randomNode(id)); - transports.put(id, TransportVersionUtils.randomVersion(random())); + versions.put(id, new CompatibilityVersions(TransportVersionUtils.randomVersion(random()))); } - return ClusterState.builder(clusterState).nodes(nodes).transportVersions(transports); + return ClusterState.builder(clusterState).nodes(nodes).compatibilityVersions(versions); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index 8f2341ecded95..db5578ee6e60b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.IndexService; @@ -27,7 +28,7 @@ import java.util.Optional; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; public class IndexingMemoryControllerIT extends ESSingleNodeTestCase { @@ -104,7 +105,6 @@ public void testDeletesAloneCanTriggerRefresh() throws Exception { for (int i = 0; i < 100; i++) { client().prepareDelete("index", Integer.toString(i)).get(); } - // need to assert busily as IndexingMemoryController refreshes in background - assertBusy(() -> assertThat(shard.refreshStats().getTotal(), greaterThan(refreshStats.getTotal() + 1))); + assertThat(shard.getEngineOrNull().getIndexBufferRAMBytesUsed(), lessThanOrEqualTo(ByteSizeUnit.KB.toBytes(1))); } } diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 2d4337358b32e..1395aae41e2af 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -170,6 +170,7 @@ exports org.elasticsearch.cluster.routing.allocation.command; exports org.elasticsearch.cluster.routing.allocation.decider; exports org.elasticsearch.cluster.service; + exports org.elasticsearch.cluster.version; exports org.elasticsearch.common; exports org.elasticsearch.common.blobstore; exports org.elasticsearch.common.blobstore.fs; diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 37ce651f1a311..f9d8b694709b6 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1838,13 +1838,13 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.http.HttpHeadersValidationException.class, org.elasticsearch.http.HttpHeadersValidationException::new, 169, - TransportVersion.V_8_500_010 + TransportVersion.V_8_500_020 ), ROLE_RESTRICTION_EXCEPTION( ElasticsearchRoleRestrictionException.class, ElasticsearchRoleRestrictionException::new, 170, - TransportVersion.V_8_500_016 + TransportVersion.V_8_500_020 ), API_NOT_AVAILABLE_EXCEPTION(ApiNotAvailableException.class, ApiNotAvailableException::new, 171, TransportVersion.V_8_500_065); diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 9d6285555aa59..3d3024b36ec9c 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -122,16 +122,6 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId * READ THE COMMENT BELOW THIS BLOCK OF DECLARATIONS BEFORE ADDING NEW TRANSPORT VERSIONS * Detached transport versions added below here. */ - public static final TransportVersion V_8_500_010 = registerTransportVersion(8_500_010, "9818C628-1EEC-439B-B943-468F61460675"); - public static final TransportVersion V_8_500_011 = registerTransportVersion(8_500_011, "2209F28D-B52E-4BC4-9889-E780F291C32E"); - public static final TransportVersion V_8_500_012 = registerTransportVersion(8_500_012, "BB6F4AF1-A860-4FD4-A138-8150FFBE0ABD"); - public static final TransportVersion V_8_500_013 = registerTransportVersion(8_500_013, "f65b85ac-db5e-4558-a487-a1dde4f6a33a"); - public static final TransportVersion V_8_500_014 = registerTransportVersion(8_500_014, "D115A2E1-1739-4A02-AB7B-64F6EA157EFB"); - public static final TransportVersion V_8_500_015 = registerTransportVersion(8_500_015, "651216c9-d54f-4189-9fe1-48d82d276863"); - public static final TransportVersion V_8_500_016 = registerTransportVersion(8_500_016, "492C94FB-AAEA-4C9E-8375-BDB67A398584"); - public static final TransportVersion V_8_500_017 = registerTransportVersion(8_500_017, "0EDCB5BA-049C-443C-8AB1-5FA58FB996FB"); - public static final TransportVersion V_8_500_018 = registerTransportVersion(8_500_018, "827C32CE-33D9-4AC3-A773-8FB768F59EAF"); - public static final TransportVersion V_8_500_019 = registerTransportVersion(8_500_019, "09bae57f-cab8-423c-aab3-c9778509ffe3"); public static final TransportVersion V_8_500_020 = registerTransportVersion(8_500_020, "ECB42C26-B258-42E5-A835-E31AF84A76DE"); public static final TransportVersion V_8_500_021 = registerTransportVersion(8_500_021, "102e0d84-0c08-402c-a696-935f3a3da873"); public static final TransportVersion V_8_500_022 = registerTransportVersion(8_500_022, "4993c724-7a81-4955-84e7-403484610091"); @@ -173,7 +163,6 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId public static final TransportVersion V_8_500_058 = registerTransportVersion(8_500_058, "41d9c98a-1de2-4dc1-86f1-abd4cc1bef57"); public static final TransportVersion V_8_500_059 = registerTransportVersion(8_500_059, "2f2090c0-7cd0-4a10-8f02-63d26073604f"); public static final TransportVersion V_8_500_060 = registerTransportVersion(8_500_060, "ec065a44-b468-4f8a-aded-7b90ca8d792b"); - // 8.10.0 release version is: public static final TransportVersion V_8_500_061 = registerTransportVersion(8_500_061, "4e07f830-8be4-448c-851e-62b3d2f0bf0a"); public static final TransportVersion V_8_500_062 = registerTransportVersion(8_500_062, "09CD9C9B-3207-4B40-8756-B7A12001A885"); public static final TransportVersion V_8_500_063 = registerTransportVersion(8_500_063, "31dedced-0055-4f34-b952-2f6919be7488"); @@ -183,7 +172,6 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId public static final TransportVersion V_8_500_067 = registerTransportVersion(8_500_067, "a7c86604-a917-4aff-9a1b-a4d44c3dbe02"); public static final TransportVersion V_8_500_068 = registerTransportVersion(8_500_068, "2683c8b4-5372-4a6a-bb3a-d61aa679089a"); public static final TransportVersion V_8_500_069 = registerTransportVersion(8_500_069, "5b804027-d8a0-421b-9970-1f53d766854b"); - /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ @@ -203,6 +191,21 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId * * If you revert a commit with a transport version change, you MUST ensure there is a NEW transport version representing the reverted * change. DO NOT let the transport version go backwards, it must ALWAYS be incremented. + * + * DETERMINING TRANSPORT VERSIONS FROM GIT HISTORY + * + * If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the + * transport versions known by a particular release ... + * + * git show v8.9.1:server/src/main/java/org/elasticsearch/TransportVersion.java | grep registerTransportVersion + * + * ... or by a particular branch ... + * + * git show 8.10:server/src/main/java/org/elasticsearch/TransportVersion.java | grep registerTransportVersion + * + * ... and you can see which versions were added in between two versions too ... + * + * git diff 8.10..main -- server/src/main/java/org/elasticsearch/TransportVersion.java */ private static class CurrentHolder { diff --git a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java index 482f854d98fa0..4800ba191edf7 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java @@ -35,8 +35,8 @@ public class ActionListenerResponseHandler i * @param executor The executor to use to deserialize the response and notify the listener. You must only use * {@link EsExecutors#DIRECT_EXECUTOR_SERVICE} (or equivalently {@link TransportResponseHandler#TRANSPORT_WORKER}) * for very performance-critical actions, and even then only if the deserialization and handling work is very cheap, - * because this executor will perform because this executor will perform all the work for responses from remote nodes on - * the receiving transport worker itself. + * because this executor will perform all the work for responses from remote nodes on the receiving transport worker + * itself. */ public ActionListenerResponseHandler(ActionListener listener, Writeable.Reader reader, Executor executor) { this.listener = Objects.requireNonNull(listener); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index dce305cb840db..e8d2194d34d36 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -117,7 +117,7 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::read); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); } else { repositoriesStats = null; @@ -294,7 +294,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(ingestStats); out.writeOptionalWriteable(adaptiveSelectionStats); out.writeOptionalWriteable(indexingPressureStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(repositoriesStats); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index f23ee6242b5c8..c33bc841190a0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -26,6 +25,7 @@ import org.elasticsearch.cluster.metadata.Metadata.Custom; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -138,9 +138,9 @@ public void onTimeout(TimeValue timeout) { } } - @SuppressForbidden(reason = "exposing ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "exposing ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } private ClusterStateResponse buildResponse(final ClusterStateRequest request, final ClusterState currentState) { @@ -151,7 +151,7 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi if (request.nodes()) { builder.nodes(currentState.nodes()); - builder.transportVersions(getTransportVersions(currentState)); + builder.compatibilityVersions(getCompatibilityVersions(currentState)); } if (request.routingTable()) { if (request.indices().length > 0) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index a2b9fa6c04f89..42f9cbd5a970b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -32,6 +32,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.snapshots.SnapshotInProgressException; @@ -291,9 +294,10 @@ private RolloverResult rolloverDataStream( currentState, createIndexClusterStateRequest, silent, - (builder, indexMetadata) -> builder.put( - dataStream.rollover(indexMetadata.getIndex(), newGeneration, metadata.isTimeSeriesTemplate(templateV2)) - ), + (builder, indexMetadata) -> { + downgradeBrokenTsdbBackingIndices(dataStream, builder); + builder.put(dataStream.rollover(indexMetadata.getIndex(), newGeneration, metadata.isTimeSeriesTemplate(templateV2))); + }, rerouteCompletionIsNotRequired() ); @@ -312,6 +316,30 @@ private RolloverResult rolloverDataStream( return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); } + /** + * This method before rollover fixes tsdb backing indices with no start and end time index settings set by + * removing the index.mode and index.routing_path index settings. This downgrades these indices to regular indices. + * Due to a bug data streams may exist that + * have backing indices with no start and end time index settings set. + * Note that as part of rollover the new backing index will be in tsdb mode. + */ + private static void downgradeBrokenTsdbBackingIndices(DataStream dataStream, Metadata.Builder builder) { + for (Index indexName : dataStream.getIndices()) { + var index = builder.getSafe(indexName); + final Settings originalSettings = index.getSettings(); + if (IndexVersion.V_8_11_0.after(index.getCreationVersion()) + && index.getIndexMode() == IndexMode.TIME_SERIES + && originalSettings.keySet().contains(IndexSettings.TIME_SERIES_START_TIME.getKey()) == false + && originalSettings.keySet().contains(IndexSettings.TIME_SERIES_END_TIME.getKey()) == false) { + final Settings.Builder settingsBuilder = Settings.builder().put(originalSettings); + settingsBuilder.remove(IndexSettings.MODE.getKey()); + settingsBuilder.remove(IndexMetadata.INDEX_ROUTING_PATH.getKey()); + long newVersion = index.getSettingsVersion() + 1; + builder.put(IndexMetadata.builder(index).settings(settingsBuilder.build()).settingsVersion(newVersion)); + } + } + } + public Metadata.Builder withShardSizeForecastForWriteIndex(String dataStreamName, Metadata.Builder metadata) { final DataStream dataStream = metadata.dataStream(dataStreamName); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index 3f8b005ca13e5..ae84242eeb678 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -60,7 +60,7 @@ public ShardStats(StreamInput in) throws IOException { isCustomDataPath = in.readBoolean(); seqNoStats = in.readOptionalWriteable(SeqNoStats::new); retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { isSearchIdle = in.readBoolean(); searchIdleTime = in.readVLong(); } else { @@ -214,7 +214,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isCustomDataPath); out.writeOptionalWriteable(seqNoStats); out.writeOptionalWriteable(retentionLeaseStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(isSearchIdle); out.writeVLong(searchIdleTime); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 3ae9da3853f1f..2ccaa58dd3a09 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -57,7 +57,7 @@ public Request(String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -68,7 +68,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(includeDefaults); } } @@ -121,7 +121,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); componentTemplates = in.readMap(ComponentTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -149,7 +149,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(componentTemplates, StreamOutput::writeWriteable); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 89c0ec366d363..1f9a9008703b2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -58,7 +58,7 @@ public Request(@Nullable String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -69,7 +69,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(includeDefaults); } } @@ -123,7 +123,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); indexTemplates = in.readMap(ComposableIndexTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -147,7 +147,7 @@ public Map indexTemplates() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(indexTemplates, StreamOutput::writeWriteable); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 08097d5002db8..f62b8cbb3bdb0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -40,7 +40,7 @@ public SimulateIndexTemplateRequest(StreamInput in) throws IOException { super(in); indexName = in.readString(); indexTemplateRequest = in.readOptionalWriteable(PutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { includeDefaults = in.readBoolean(); } } @@ -50,7 +50,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(indexName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 67ee242b062be..92d765dac8ee2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -73,7 +73,7 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { } else { this.overlappingTemplates = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } } @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java index 1470042091169..3dac322cd35dc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java @@ -63,7 +63,7 @@ public Request(StreamInput in) throws IOException { super(in); templateName = in.readOptionalString(); indexTemplateRequest = in.readOptionalWriteable(PutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { includeDefaults = in.readBoolean(); } } @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(templateName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 85ebd93f61317..41e926ceaeec0 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -71,7 +71,7 @@ public Request(StreamInput in) throws IOException { super(in); this.names = in.readOptionalStringArray(); this.indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { this.includeDefaults = in.readBoolean(); } else { this.includeDefaults = false; @@ -83,7 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalStringArray(names); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(includeDefaults); } } @@ -342,7 +342,7 @@ public Response(List dataStreams, @Nullable RolloverConfiguratio public Response(StreamInput in) throws IOException { this( in.readCollectionAsList(DataStreamInfo::new), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010) + in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020) ? in.readOptionalWriteable(RolloverConfiguration::new) : null ); @@ -360,7 +360,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(dataStreams); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index a8cb8002a80d8..03d565c9a97a5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -51,7 +51,7 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { this.keepAlive = in.readTimeValue(); this.routing = in.readOptionalString(); this.preference = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_017)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { this.maxConcurrentShardRequests = in.readVInt(); } } @@ -64,7 +64,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeTimeValue(keepAlive); out.writeOptionalString(routing); out.writeOptionalString(preference); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_017)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeVInt(maxConcurrentShardRequests); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 07fed9598ea8b..9c78e5ad62aea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; @@ -167,10 +167,29 @@ protected void executePhaseOnShard( @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { return new SearchPhase(getName()) { + + private void onExecuteFailure(Exception e) { + onPhaseFailure(this, "sending response failed", e); + } + @Override public void run() { - final AtomicArray atomicArray = results.getAtomicArray(); - sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, atomicArray); + execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + onExecuteFailure(e); + } + + @Override + protected void doRun() { + sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); + } + + @Override + public boolean isForceExecution() { + return true; // we already created the PIT, no sense in rejecting the task that sends the response. + } + }); } }; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index a185b9acadb8c..55666ac3f1918 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -691,7 +691,7 @@ Map createFinalResponse() { final String[] indices = entry.getValue().indices(); final Executor responseExecutor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION); // TODO: support point-in-time - if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { SearchShardsRequest searchShardsRequest = new SearchShardsRequest( indices, indicesOptions, diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java index 64c6b3e98bc8b..b1a6822ac37da 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java @@ -46,7 +46,7 @@ public BroadcastUnpromotableRequest(StreamInput in) throws IOException { indexShardRoutingTable = null; shardId = new ShardId(in); indices = new String[] { shardId.getIndex().getName() }; - failShardOnError = in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010) && in.readBoolean(); + failShardOnError = in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020) && in.readBoolean(); } public BroadcastUnpromotableRequest(IndexShardRoutingTable indexShardRoutingTable) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 59d2a94a21c92..0c165468dfba5 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; @@ -188,6 +189,7 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { // The following classes use MethodHandles.lookup during initialization, load them now (before SM) to be sure they succeed AbstractRefCounted.class, SubscribableListener.class, + RunOnce.class, // We eagerly initialize to work around log4j permissions & JDK-8309727 VectorUtil.class ); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 5bd4469935b6f..e45fc2d7da0e9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -48,7 +49,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.Comparator; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; @@ -128,16 +128,16 @@ default boolean isPrivate() { private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); - private static final DiffableUtils.ValueSerializer TRANSPORT_VERSION_VALUE_SERIALIZER = + private static final DiffableUtils.ValueSerializer COMPATIBILITY_VERSIONS_VALUE_SERIALIZER = new DiffableUtils.NonDiffableValueSerializer<>() { @Override - public void write(TransportVersion value, StreamOutput out) throws IOException { - TransportVersion.writeVersion(value, out); + public void write(CompatibilityVersions value, StreamOutput out) throws IOException { + TransportVersion.writeVersion(value.transportVersion(), out); } @Override - public TransportVersion read(StreamInput in, String key) throws IOException { - return TransportVersion.readVersion(in); + public CompatibilityVersions read(StreamInput in, String key) throws IOException { + return new CompatibilityVersions(TransportVersion.readVersion(in)); } }; @@ -163,8 +163,8 @@ public TransportVersion read(StreamInput in, String key) throws IOException { private final DiscoveryNodes nodes; - private final Map transportVersions; - private final TransportVersion minTransportVersion; + private final Map compatibilityVersions; + private final CompatibilityVersions minVersions; private final Metadata metadata; @@ -187,7 +187,7 @@ public ClusterState(long version, String stateUUID, ClusterState state) { state.metadata(), state.routingTable(), state.nodes(), - state.transportVersions, + state.compatibilityVersions, state.blocks(), state.customs(), false, @@ -202,7 +202,7 @@ public ClusterState( Metadata metadata, RoutingTable routingTable, DiscoveryNodes nodes, - Map transportVersions, + Map compatibilityVersions, ClusterBlocks blocks, Map customs, boolean wasReadFromDiff, @@ -214,20 +214,16 @@ public ClusterState( this.metadata = metadata; this.routingTable = routingTable; this.nodes = nodes; - this.transportVersions = Map.copyOf(transportVersions); + this.compatibilityVersions = Map.copyOf(compatibilityVersions); this.blocks = blocks; this.customs = customs; this.wasReadFromDiff = wasReadFromDiff; this.routingNodes = routingNodes; assert assertConsistentRoutingNodes(routingTable, nodes, routingNodes); - this.minTransportVersion = blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) - ? TransportVersion.MINIMUM_COMPATIBLE - : transportVersions.values() - .stream() - .min(Comparator.naturalOrder()) - // In practice transportVersions is always nonempty (except in tests) but use a conservative default anyway: - .orElse(TransportVersion.MINIMUM_COMPATIBLE); + this.minVersions = blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) + ? new CompatibilityVersions(TransportVersion.MINIMUM_COMPATIBLE) + : CompatibilityVersions.minimumVersions(compatibilityVersions); } private static boolean assertConsistentRoutingNodes( @@ -283,12 +279,12 @@ public DiscoveryNodes nodesIfRecovered() { return blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) ? DiscoveryNodes.EMPTY_NODES : nodes; } - public Map transportVersions() { - return this.transportVersions; + public Map compatibilityVersions() { + return this.compatibilityVersions; } public TransportVersion getMinTransportVersion() { - return this.minTransportVersion; + return this.minVersions.transportVersion(); } public Metadata metadata() { @@ -477,9 +473,9 @@ public String toString() { } sb.append(blocks()); sb.append(nodes()); - if (transportVersions.isEmpty() == false) { - sb.append("transport versions:\n"); - for (var tv : transportVersions.entrySet()) { + if (compatibilityVersions.isEmpty() == false) { + sb.append("node versions:\n"); + for (var tv : compatibilityVersions.entrySet()) { sb.append(TAB).append(tv.getKey()).append(": ").append(tv.getValue()).append("\n"); } } @@ -633,21 +629,34 @@ public Iterator toXContentChunked(ToXContent.Params outerP (builder, params) -> builder.endObject() ), - // transportVersions + // transportVersions - redundant with the nodes_versions section but has to stay for backwards compatibility // just use NODES again, its node-related information chunkedSection( metrics.contains(Metric.NODES), (builder, params) -> builder.startArray("transport_versions"), - transportVersions.entrySet().iterator(), + compatibilityVersions.entrySet().iterator(), e -> Iterators.single( (builder, params) -> builder.startObject() .field("node_id", e.getKey()) - .field("transport_version", e.getValue().toString()) + .field("transport_version", e.getValue().transportVersion().toString()) .endObject() ), (builder, params) -> builder.endArray() ), + // per-node version information + chunkedSection( + metrics.contains(Metric.NODES), + (builder, params) -> builder.startArray("nodes_versions"), + compatibilityVersions.entrySet().iterator(), + e -> Iterators.single((builder, params) -> { + builder.startObject().field("node_id", e.getKey()); + e.getValue().toXContent(builder, params); + return builder.endObject(); + }), + (builder, params) -> builder.endArray() + ), + // metadata metrics.contains(Metric.METADATA) ? metadata.toXContentChunked(outerParams) : Collections.emptyIterator(), @@ -740,7 +749,7 @@ public static class Builder { private Metadata metadata = Metadata.EMPTY_METADATA; private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES; - private final Map transportVersions; + private final Map compatibilityVersions; private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; private boolean fromDiff; @@ -751,7 +760,7 @@ public Builder(ClusterState state) { this.version = state.version(); this.uuid = state.stateUUID(); this.nodes = state.nodes(); - this.transportVersions = new HashMap<>(state.transportVersions); + this.compatibilityVersions = new HashMap<>(state.compatibilityVersions); this.routingTable = state.routingTable(); this.metadata = state.metadata(); this.blocks = state.blocks(); @@ -760,7 +769,7 @@ public Builder(ClusterState state) { } public Builder(ClusterName clusterName) { - this.transportVersions = new HashMap<>(); + this.compatibilityVersions = new HashMap<>(); customs = ImmutableOpenMap.builder(); this.clusterName = clusterName; } @@ -778,21 +787,21 @@ public DiscoveryNodes nodes() { return nodes; } - public Builder putTransportVersion(String nodeId, TransportVersion version) { - transportVersions.put(nodeId, Objects.requireNonNull(version, nodeId)); + public Builder putTransportVersion(String nodeId, TransportVersion transportVersion) { + compatibilityVersions.put(nodeId, new CompatibilityVersions(Objects.requireNonNull(transportVersion, nodeId))); return this; } - public Builder transportVersions(Map versions) { + public Builder compatibilityVersions(Map versions) { versions.forEach((key, value) -> Objects.requireNonNull(value, key)); // remove all versions not present in the new map - this.transportVersions.keySet().retainAll(versions.keySet()); - this.transportVersions.putAll(versions); + this.compatibilityVersions.keySet().retainAll(versions.keySet()); + this.compatibilityVersions.putAll(versions); return this; } - public Map transportVersions() { - return Collections.unmodifiableMap(this.transportVersions); + public Map compatibilityVersions() { + return Collections.unmodifiableMap(this.compatibilityVersions); } public Builder routingTable(RoutingTable.Builder routingTableBuilder) { @@ -880,7 +889,7 @@ public ClusterState build() { metadata, routingTable, nodes, - transportVersions, + compatibilityVersions, blocks, customs.build(), fromDiff, @@ -923,7 +932,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr builder.routingTable = RoutingTable.readFrom(in); builder.nodes = DiscoveryNodes.readFrom(in, localNode); if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - builder.transportVersions(in.readMap(TransportVersion::readVersion)); + builder.compatibilityVersions(in.readMap(CompatibilityVersions::readVersion)); } else { // this clusterstate is from a pre-8.8.0 node // infer the versions from discoverynodes for now @@ -968,7 +977,7 @@ public void writeTo(StreamOutput out) throws IOException { routingTable.writeTo(out); nodes.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - out.writeMap(transportVersions, (o, v) -> TransportVersion.writeVersion(v, o)); + out.writeMap(compatibilityVersions, (streamOutput, versions) -> versions.writeTo(streamOutput)); } blocks.writeTo(out); VersionedNamedWriteable.writeVersionedWritables(out, customs); @@ -992,7 +1001,7 @@ private static class ClusterStateDiff implements Diff { private final Diff nodes; @Nullable - private final Diff> transportVersions; + private final Diff> versions; private final Diff metadata; @@ -1007,11 +1016,11 @@ private static class ClusterStateDiff implements Diff { clusterName = after.clusterName; routingTable = after.routingTable.diff(before.routingTable); nodes = after.nodes.diff(before.nodes); - transportVersions = DiffableUtils.diff( - before.transportVersions, - after.transportVersions, + versions = DiffableUtils.diff( + before.compatibilityVersions, + after.compatibilityVersions, DiffableUtils.getStringKeySerializer(), - TRANSPORT_VERSION_VALUE_SERIALIZER + COMPATIBILITY_VERSIONS_VALUE_SERIALIZER ); metadata = after.metadata.diff(before.metadata); blocks = after.blocks.diff(before.blocks); @@ -1026,13 +1035,13 @@ private static class ClusterStateDiff implements Diff { routingTable = RoutingTable.readDiffFrom(in); nodes = DiscoveryNodes.readDiffFrom(in, localNode); if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) && in.readBoolean()) { - transportVersions = DiffableUtils.readJdkMapDiff( + versions = DiffableUtils.readJdkMapDiff( in, DiffableUtils.getStringKeySerializer(), - TRANSPORT_VERSION_VALUE_SERIALIZER + COMPATIBILITY_VERSIONS_VALUE_SERIALIZER ); } else { - transportVersions = null; // infer at application time + versions = null; // infer at application time } metadata = Metadata.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); @@ -1051,7 +1060,7 @@ public void writeTo(StreamOutput out) throws IOException { routingTable.writeTo(out); nodes.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - out.writeOptionalWriteable(transportVersions); + out.writeOptionalWriteable(versions); } metadata.writeTo(out); blocks.writeTo(out); @@ -1075,8 +1084,8 @@ public ClusterState apply(ClusterState state) { builder.version(toVersion); builder.routingTable(routingTable.apply(state.routingTable)); builder.nodes(nodes.apply(state.nodes)); - if (transportVersions != null) { - builder.transportVersions(transportVersions.apply(state.transportVersions)); + if (versions != null) { + builder.compatibilityVersions(this.versions.apply(state.compatibilityVersions)); } else { // infer the versions from discoverynodes for now builder.nodes().getNodes().values().forEach(n -> builder.putTransportVersion(n.getId(), inferTransportVersion(n))); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index afb997eeb20fa..452aae4a1c467 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -79,7 +79,7 @@ public void setClient(Client client) { * potentially waiting for a master node to be available. */ public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { - final RunOnce release = new RunOnce(() -> semaphore.release()); + final RunOnce release = new RunOnce(semaphore::release); try { semaphore.acquire(); } catch (InterruptedException e) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 5aa6dc8ab4729..26b8dc77eb65a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.IndexVersion; @@ -120,7 +121,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); - Map transportVersions = new HashMap<>(newState.transportVersions()); + Map compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions()); assert nodesBuilder.isLocalNodeElectedMaster(); @@ -139,18 +140,18 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex logger.debug("received a join request for an existing node [{}]", node); } else { try { - TransportVersion transportVersion = nodeJoinTask.transportVersion(); + CompatibilityVersions compatibilityVersions = new CompatibilityVersions(nodeJoinTask.transportVersion()); if (enforceVersionBarrier) { ensureVersionBarrier(node.getVersion(), minClusterNodeVersion); - ensureTransportVersionBarrier(transportVersion, transportVersions.values()); + ensureTransportVersionBarrier(compatibilityVersions, compatibilityVersionsMap.values()); } - blockForbiddenVersions(transportVersion); + blockForbiddenVersions(compatibilityVersions.transportVersion()); ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); nodesBuilder.add(node); - transportVersions.put(node.getId(), transportVersion); + compatibilityVersionsMap.put(node.getId(), compatibilityVersions); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -221,7 +222,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } final ClusterState clusterStateWithNewNodesAndDesiredNodes = DesiredNodes.updateDesiredNodesStatusIfNeeded( - newState.nodes(nodesBuilder).transportVersions(transportVersions).build() + newState.nodes(nodesBuilder).compatibilityVersions(compatibilityVersionsMap).build() ); final ClusterState updatedState = allocationService.adaptAutoExpandReplicas(clusterStateWithNewNodesAndDesiredNodes); assert enforceVersionBarrier == false @@ -239,9 +240,9 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } } - @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "maintaining ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( @@ -265,7 +266,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( assert currentState.term() < term : term + " vs " + currentState; DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); - Map transportVersions = new HashMap<>(getTransportVersions(currentState)); + Map compatibilityVersions = new HashMap<>(getCompatibilityVersions(currentState)); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); nodesBuilder.resetNodeLeftGeneration(); @@ -275,7 +276,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( if (nodeWithSameId != null && nodeWithSameId.equals(joiningNode) == false) { logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", nodeWithSameId, joiningNode); nodesBuilder.remove(nodeWithSameId.getId()); - transportVersions.remove(nodeWithSameId.getId()); + compatibilityVersions.remove(nodeWithSameId.getId()); } final DiscoveryNode nodeWithSameAddress = currentNodes.findByAddress(joiningNode.getAddress()); if (nodeWithSameAddress != null && nodeWithSameAddress.equals(joiningNode) == false) { @@ -285,7 +286,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( joiningNode ); nodesBuilder.remove(nodeWithSameAddress.getId()); - transportVersions.remove(nodeWithSameAddress.getId()); + compatibilityVersions.remove(nodeWithSameAddress.getId()); } } } @@ -294,7 +295,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( // or removed by us above ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) - .transportVersions(transportVersions) + .compatibilityVersions(compatibilityVersions) .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .metadata( Metadata.builder(currentState.metadata()) @@ -394,16 +395,17 @@ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version * to ensure that the minimum transport version of the cluster doesn't go backwards. **/ static void ensureTransportVersionBarrier( - TransportVersion joiningTransportVersion, - Collection existingTransportVersions + CompatibilityVersions joiningCompatibilityVersions, + Collection existingTransportVersions ) { TransportVersion minClusterTransportVersion = existingTransportVersions.stream() + .map(CompatibilityVersions::transportVersion) .min(Comparator.naturalOrder()) .orElse(TransportVersion.current()); - if (joiningTransportVersion.before(minClusterTransportVersion)) { + if (joiningCompatibilityVersions.transportVersion().before(minClusterTransportVersion)) { throw new IllegalStateException( "node with transport version [" - + joiningTransportVersion + + joiningCompatibilityVersions.transportVersion() + "] may not join a cluster with minimum transport version [" + minClusterTransportVersion + "]" diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java index 995066106e8ca..68c611aeef9a6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; @@ -18,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -50,23 +50,23 @@ public NodeLeftExecutor(AllocationService allocationService) { this.allocationService = allocationService; } - @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "maintaining ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { ClusterState initialState = batchExecutionContext.initialState(); DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(initialState.nodes()); - Map transportVersions = new HashMap<>(getTransportVersions(initialState)); + Map compatibilityVersions = new HashMap<>(getCompatibilityVersions(initialState)); boolean removed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); final String reason; if (initialState.nodes().nodeExists(task.node())) { remainingNodesBuilder.remove(task.node()); - transportVersions.remove(task.node().getId()); + compatibilityVersions.remove(task.node().getId()); removed = true; reason = task.reason(); } else { @@ -89,7 +89,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) t try (var ignored = batchExecutionContext.dropHeadersContext()) { // suppress deprecation warnings e.g. from reroute() - final var remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder, transportVersions); + final var remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder, compatibilityVersions); final var ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); return allocationService.disassociateDeadNodes( ptasksDisassociatedState, @@ -105,9 +105,9 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) t protected ClusterState remainingNodesClusterState( ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder, - Map transportVersions + Map compatibilityVersions ) { - return ClusterState.builder(currentState).nodes(remainingNodesBuilder).transportVersions(transportVersions).build(); + return ClusterState.builder(currentState).nodes(remainingNodesBuilder).compatibilityVersions(compatibilityVersions).build(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index f7cf0d53cc93c..4483808e595f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -789,7 +789,7 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null + in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null ); } @@ -818,7 +818,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { out.writeOptionalEnum(indexMode); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(lifecycle); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 6f24fe94387e9..953e34050e537 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -172,7 +172,7 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(dataRetention); } if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_026)) { @@ -184,7 +184,7 @@ public void writeTo(StreamOutput out) throws IOException { } public DataStreamLifecycle(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { dataRetention = in.readOptionalWriteable(Retention::read); } else { dataRetention = null; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 84acb68f183e4..0a0f6c375f69a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1233,8 +1233,7 @@ public Map templatesV2() { } public boolean isTimeSeriesTemplate(ComposableIndexTemplate indexTemplate) { - var template = indexTemplate.template(); - if (indexTemplate.getDataStreamTemplate() == null || template == null) { + if (indexTemplate.getDataStreamTemplate() == null) { return false; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index c94178488ed3f..6c58dbea51d1e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -34,8 +34,8 @@ public class SingleNodeShutdownMetadata implements SimpleDiffable, ToXContentObject { public static final TransportVersion REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = TransportVersion.V_7_16_0; - public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersion.V_8_500_010; - public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersion.V_8_500_010; + public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersion.V_8_500_020; + public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersion.V_8_500_020; public static final ParseField NODE_ID_FIELD = new ParseField("node_id"); public static final ParseField TYPE_FIELD = new ParseField("type"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 85364a4543784..7f43f8a15930b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -123,7 +123,7 @@ public Template(StreamInput in) throws IOException { } if (in.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { this.lifecycle = in.readOptionalWriteable(DataStreamLifecycle::new); - } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { boolean isExplicitNull = in.readBoolean(); if (isExplicitNull) { this.lifecycle = DataStreamLifecycle.newBuilder().enabled(false).build(); @@ -177,7 +177,7 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { out.writeOptionalWriteable(lifecycle); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { boolean isExplicitNull = lifecycle != null && lifecycle.isEnabled() == false; out.writeBoolean(isExplicitNull); if (isExplicitNull == false) { diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 9443a0fd951b8..dad68ad3605da 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -666,7 +666,7 @@ public String shortSummary() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(masterNodeId); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeVLong(nodeLeftGeneration); } // else nodeLeftGeneration is zero, or we're sending this to a remote cluster which does not care about the nodeLeftGeneration out.writeCollection(nodes.values()); @@ -681,7 +681,7 @@ public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) t builder.localNodeId(localNode.getId()); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { builder.nodeLeftGeneration(in.readVLong()); } // else nodeLeftGeneration is zero, or we're receiving this from a remote cluster so the nodeLeftGeneration does not matter to us diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index d98979c6d0611..279c774127e04 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; @@ -31,11 +32,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Collections; import java.util.Comparator; -import java.util.List; +import java.util.Iterator; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiFunction; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -179,7 +178,7 @@ private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { while (unassignedIterator.hasNext()) { final ShardRouting shardRouting = unassignedIterator.next(); final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); - if (shardRouting.primary() && unassignedInfo.getLastAllocationStatus() == UnassignedInfo.AllocationStatus.NO_ATTEMPT) { + if (shardRouting.primary() && unassignedInfo.getLastAllocationStatus() == AllocationStatus.NO_ATTEMPT) { unassignedIterator.updateUnassigned( new UnassignedInfo( unassignedInfo.getReason(), @@ -189,7 +188,7 @@ private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), unassignedInfo.isDelayed(), - UnassignedInfo.AllocationStatus.DECIDERS_NO, + AllocationStatus.DECIDERS_NO, unassignedInfo.getFailedNodeIds(), unassignedInfo.getLastAllocatedNodeId() ), @@ -249,69 +248,60 @@ private void allocateUnassigned() { final var shard = primary[i]; final var assignment = desiredBalance.getAssignment(shard.shardId()); final boolean ignored = assignment == null || isIgnored(routingNodes, shard, assignment); - final var isThrottled = new AtomicBoolean(false); - if (ignored == false) { - for (final var nodeIdIterator : List.of( - getDesiredNodesIds(shard, assignment), - getFallbackNodeIds(shard, isThrottled) - )) { - for (final var desiredNodeId : nodeIdIterator) { - final var routingNode = routingNodes.node(desiredNodeId); - if (routingNode == null) { - // desired node no longer exists - continue; - } - final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); - switch (decision.type()) { - case YES -> { - logger.debug("Assigning shard [{}] to [{}]", shard, desiredNodeId); - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - routingNodes.initializeShard(shard, desiredNodeId, null, shardSize, allocation.changes()); - allocationOrdering.recordAllocation(desiredNodeId); - if (shard.primary() == false) { - // copy over the same replica shards to the secondary array so they will get allocated - // in a subsequent iteration, allowing replicas of other shards to be allocated first - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - secondary[secondaryLength++] = primary[++i]; - } + AllocationStatus unallocatedStatus; + if (ignored) { + unallocatedStatus = AllocationStatus.NO_ATTEMPT; + } else { + unallocatedStatus = AllocationStatus.DECIDERS_NO; + final var nodeIdsIterator = new NodeIdsIterator(shard, assignment); + while (nodeIdsIterator.hasNext()) { + final var nodeId = nodeIdsIterator.next(); + final var routingNode = routingNodes.node(nodeId); + if (routingNode == null) { + // desired node no longer exists + continue; + } + final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); + switch (decision.type()) { + case YES -> { + logger.debug("Assigning shard [{}] to {} [{}]", shard, nodeIdsIterator.source, nodeId); + final long shardSize = DiskThresholdDecider.getExpectedShardSize( + shard, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + routingNodes.initializeShard(shard, nodeId, null, shardSize, allocation.changes()); + allocationOrdering.recordAllocation(nodeId); + if (shard.primary() == false) { + // copy over the same replica shards to the secondary array so they will get allocated + // in a subsequent iteration, allowing replicas of other shards to be allocated first + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + secondary[secondaryLength++] = primary[++i]; } - continue nextShard; - } - case THROTTLE -> { - isThrottled.set(true); - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), desiredNodeId, decision); - } - case NO -> { - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), desiredNodeId, decision); } + continue nextShard; + } + case THROTTLE -> { + nodeIdsIterator.wasThrottled = true; + unallocatedStatus = AllocationStatus.DECIDERS_THROTTLED; + logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); + } + case NO -> { + logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); } } } } - logger.debug("No eligible node found to assign shard [{}] amongst [{}]", shard, assignment); - - final UnassignedInfo.AllocationStatus allocationStatus; - if (ignored) { - allocationStatus = UnassignedInfo.AllocationStatus.NO_ATTEMPT; - } else if (isThrottled.get()) { - allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; - } else { - allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_NO; - } - - unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); + logger.debug("No eligible node found to assign shard [{}]", shard); + unassigned.ignoreShard(shard, unallocatedStatus, allocation.changes()); if (shard.primary() == false) { // we could not allocate it and we are a replica - check if we can ignore the other replicas while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); + unassigned.ignoreShard(primary[++i], unallocatedStatus, allocation.changes()); } } } @@ -323,23 +313,57 @@ private void allocateUnassigned() { } while (primaryLength > 0); } - private Iterable getDesiredNodesIds(ShardRouting shard, ShardAssignment assignment) { - return allocationOrdering.sort(allocation.deciders().getForcedInitialShardAllocationToNodes(shard, allocation).map(forced -> { - logger.debug("Shard [{}] assignment is ignored. Initial allocation forced to {}", shard.shardId(), forced); - return forced; - }).orElse(assignment.nodeIds())); - } + private final class NodeIdsIterator implements Iterator { + + private final ShardRouting shard; - private Iterable getFallbackNodeIds(ShardRouting shard, AtomicBoolean isThrottled) { - return () -> { - if (shard.primary() && isThrottled.get() == false) { + /** + * Contains the source of the nodeIds used for shard assignment. It could be: + * * desired - when using desired nodes + * * forced initial allocation - when initial allocation is forced to certain nodes by shrink/split/clone index operation + * * fallback - when assigning the primary shard is temporarily not possible on desired nodes, + * and it is assigned elsewhere in the cluster + */ + private NodeIdSource source; + private Iterator nodeIds; + + private boolean wasThrottled = false; + + NodeIdsIterator(ShardRouting shard, ShardAssignment assignment) { + this.shard = shard; + + var forcedInitialAllocation = allocation.deciders().getForcedInitialShardAllocationToNodes(shard, allocation); + if (forcedInitialAllocation.isPresent()) { + logger.debug("Shard [{}] initial allocation is forced to {}", shard.shardId(), forcedInitialAllocation.get()); + nodeIds = allocationOrdering.sort(forcedInitialAllocation.get()).iterator(); + source = NodeIdSource.FORCED_INITIAL_ALLOCATION; + } else { + nodeIds = allocationOrdering.sort(assignment.nodeIds()).iterator(); + source = NodeIdSource.DESIRED; + } + } + + @Override + public boolean hasNext() { + if (nodeIds.hasNext() == false && source == NodeIdSource.DESIRED && shard.primary() && wasThrottled == false) { var fallbackNodeIds = allocation.routingNodes().getAllNodeIds(); logger.debug("Shard [{}] assignment is temporarily not possible. Falling back to {}", shard.shardId(), fallbackNodeIds); - return allocationOrdering.sort(fallbackNodeIds).iterator(); - } else { - return Collections.emptyIterator(); + nodeIds = allocationOrdering.sort(fallbackNodeIds).iterator(); + source = NodeIdSource.FALLBACK; } - }; + return nodeIds.hasNext(); + } + + @Override + public String next() { + return nodeIds.next(); + } + } + + private enum NodeIdSource { + DESIRED, + FORCED_INITIAL_ALLOCATION, + FALLBACK; } private boolean isIgnored(RoutingNodes routingNodes, ShardRouting shard, ShardAssignment assignment) { diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index d22c22a22be10..7a83e26fcc8fb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.SuppressForbidden; @@ -34,6 +35,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -99,7 +101,10 @@ public ClusterState execute(BatchExecutionContext cont for (var c : context.taskContexts()) { for (var e : c.getTask().results().entrySet()) { // this node's transport version might have been updated already/node has gone away - TransportVersion recordedTv = builder.transportVersions().get(e.getKey()); + var cvMap = builder.compatibilityVersions(); + TransportVersion recordedTv = Optional.ofNullable(cvMap.get(e.getKey())) + .map(CompatibilityVersions::transportVersion) + .orElse(null); assert (recordedTv != null) || (context.initialState().nodes().nodeExists(e.getKey()) == false) : "Node " + e.getKey() + " is in the cluster but does not have an associated transport version recorded"; if (Objects.equals(recordedTv, INFERRED_TRANSPORT_VERSION)) { @@ -113,9 +118,9 @@ public ClusterState execute(BatchExecutionContext cont } } - @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "maintaining ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } @Override @@ -129,9 +134,9 @@ public void clusterChanged(ClusterChangedEvent event) { && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes - Set nodes = getTransportVersions(event.state()).entrySet() + Set nodes = getCompatibilityVersions(event.state()).entrySet() .stream() - .filter(e -> e.getValue().equals(INFERRED_TRANSPORT_VERSION)) + .filter(e -> e.getValue().transportVersion().equals(INFERRED_TRANSPORT_VERSION)) .map(Map.Entry::getKey) .collect(Collectors.toSet()); diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java new file mode 100644 index 0000000000000..9463b9db08e3f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.version; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Comparator; +import java.util.Map; + +/** + * Wraps component version numbers for cluster state + * + *

Cluster state will need to carry version information for different independently versioned components. + * This wrapper lets us wrap these versions one level below {@link org.elasticsearch.cluster.ClusterState}. + * It's similar to {@link org.elasticsearch.cluster.node.VersionInformation}, but this class is meant to + * be constructed during node startup and hold values from plugins as well. + * + * @param transportVersion A transport version, usually a minimum compatible one for a node. + */ +public record CompatibilityVersions(TransportVersion transportVersion) implements Writeable, ToXContentFragment { + + /** + * Constructs a VersionWrapper collecting all the minimum versions from the values of the map. + * + * @param compatibilityVersions A map of strings (typically node identifiers) and versions wrappers + * @return Minimum versions for the cluster + */ + public static CompatibilityVersions minimumVersions(Map compatibilityVersions) { + return new CompatibilityVersions( + compatibilityVersions.values() + .stream() + .map(CompatibilityVersions::transportVersion) + .min(Comparator.naturalOrder()) + // In practice transportVersions is always nonempty (except in tests) but use a conservative default anyway: + .orElse(TransportVersion.MINIMUM_COMPATIBLE) + ); + } + + public static CompatibilityVersions readVersion(StreamInput in) throws IOException { + return new CompatibilityVersions(TransportVersion.readVersion(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportVersion.writeVersion(this.transportVersion(), out); + } + + /** + * Adds fields to the builder without starting an object. We expect this method to be called within an object that may + * already have a nodeId field. + * @param builder The builder for the XContent + * @param params Ignored here. + * @return The builder with fields for versions added + * @throws IOException if the builder can't accept what we try to add + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("transport_version", this.transportVersion().toString()); + return builder; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java index e14f2d6463fa2..a43fc04cb0460 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java @@ -7,23 +7,35 @@ */ package org.elasticsearch.common.util.concurrent; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; /** * Runnable that prevents running its delegate more than once. */ public class RunOnce implements Runnable { - private final AtomicReference delegateRef; + private static final VarHandle VH_DELEGATE_FIELD; + + static { + try { + VH_DELEGATE_FIELD = MethodHandles.lookup().in(RunOnce.class).findVarHandle(RunOnce.class, "delegate", Runnable.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + @SuppressWarnings("FieldMayBeFinal") // updated via VH_DELEGATE_FIELD (and _only_ via VH_DELEGATE_FIELD) + private volatile Runnable delegate; public RunOnce(final Runnable delegate) { - delegateRef = new AtomicReference<>(Objects.requireNonNull(delegate)); + this.delegate = Objects.requireNonNull(delegate); } @Override public void run() { - var acquired = delegateRef.getAndSet(null); + var acquired = (Runnable) VH_DELEGATE_FIELD.compareAndExchange(this, delegate, null); if (acquired != null) { acquired.run(); } @@ -33,11 +45,11 @@ public void run() { * {@code true} if the {@link RunOnce} has been executed once. */ public boolean hasRun() { - return delegateRef.get() == null; + return delegate == null; } @Override public String toString() { - return "RunOnce[" + delegateRef.get() + "]"; + return "RunOnce[" + delegate + "]"; } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index b9ba9f95d265a..34f415e46462a 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -122,7 +122,7 @@ private static IndexVersion registerIndexVersion(int id, Version luceneVersion, * Detached index versions added below here. */ public static final IndexVersion V_8_500_000 = registerIndexVersion(8_500_000, Version.LUCENE_9_7_0, "bf656f5e-5808-4eee-bf8a-e2bf6736ff55"); - + public static final IndexVersion V_8_500_001 = registerIndexVersion(8_500_001, Version.LUCENE_9_7_0, "45045a5a-fc57-4462-89f6-6bc04cda6015"); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ @@ -142,10 +142,15 @@ private static IndexVersion registerIndexVersion(int id, Version luceneVersion, * * If you revert a commit with an index version change, you MUST ensure there is a NEW index version representing the reverted * change. DO NOT let the index version go backwards, it must ALWAYS be incremented. + * + * DETERMINING TRANSPORT VERSIONS FROM GIT HISTORY + * + * TODO after the release of v8.11.0, copy the instructions about using git to track the history of versions from TransportVersion.java + * (the example commands won't make sense until at least 8.11.0 is released) */ private static class CurrentHolder { - private static final IndexVersion CURRENT = findCurrent(V_8_500_000); + private static final IndexVersion CURRENT = findCurrent(V_8_500_001); // finds the pluggable current version, or uses the given fallback private static IndexVersion findCurrent(IndexVersion fallback) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 18317fd0bed7a..7f896c352d958 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1126,7 +1126,7 @@ public void externalRefresh(String source, ActionListener * Called when our engine is using too much heap and should move buffered indexed/deleted documents to disk. */ // NOTE: do NOT rename this to something containing flush or refresh! - public abstract void writeIndexingBuffer() throws EngineException; + public abstract void writeIndexingBuffer() throws IOException; /** * Checks if this engine should be flushed periodically. diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 363a71719efbe..2db91288ae174 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -125,6 +125,7 @@ import static org.elasticsearch.core.Strings.format; public class InternalEngine extends Engine { + /** * When we last pruned expired tombstones from versionMap.deletes: */ @@ -2035,9 +2036,28 @@ protected final RefreshResult refresh(String source, SearcherScope scope, boolea } @Override - public void writeIndexingBuffer() throws EngineException { + public void writeIndexingBuffer() throws IOException { + final long versionMapBytesUsed = versionMap.ramBytesUsedForRefresh(); + // Only count bytes that are not already being written to disk. Note: this number may be negative at times if these two metrics get + // updated concurrently. It's fine as it's only being used as a heuristic to decide on a full refresh vs. writing a single segment. + // TODO: it might be more relevant to use the RAM usage of the largest DWPT as opposed to the overall RAM usage? Can we get this + // exposed in Lucene? + final long indexWriterBytesUsed = indexWriter.ramBytesUsed() - indexWriter.getFlushingBytes(); + + if (versionMapBytesUsed >= indexWriterBytesUsed) { + // This method expects to reclaim memory quickly, so if the version map is using more memory than the IndexWriter buffer then we + // do a refresh, which is the only way to reclaim memory from the version map. IndexWriter#flushNextBuffer has similar logic: if + // pending deletes occupy more than half of RAMBufferSizeMB then deletes are applied too. + reclaimVersionMapMemory(); + } else { + // Write the largest pending segment. + indexWriter.flushNextBuffer(); + } + } + + private void reclaimVersionMapMemory() { // If we're already halfway through the flush thresholds, then we do a flush. This will save us from writing segments twice - // independently in a short period of time, once to reclaim IndexWriter buffer memory and then to reclaim the translog. For + // independently in a short period of time, once to reclaim version map memory and then to reclaim the translog. For // memory-constrained deployments that need to refresh often to reclaim memory, this may require flushing 2x more often than // expected, but the general assumption is that this downside is an ok trade-off given the benefit of flushing the whole content of // the indexing buffer less often. @@ -2048,12 +2068,9 @@ public void writeIndexingBuffer() throws EngineException { final long flushThresholdAgeInNanos = config().getIndexSettings().getFlushThresholdAge().getNanos() / 2; if (shouldPeriodicallyFlush(flushThresholdSizeInBytes, flushThresholdAgeInNanos)) { flush(false, false, ActionListener.noop()); - return; + } else { + refresh("write indexing buffer", SearcherScope.INTERNAL, false); } - - // TODO: revise https://github.com/elastic/elasticsearch/pull/34553 to use IndexWriter.flushNextBuffer to flush only the largest - // pending DWPT. Note that benchmarking this PR with a heavy update user case (geonames) and a small heap (1GB) caused OOM. - refresh("write indexing buffer", SearcherScope.INTERNAL, false); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 4fed853d945c9..86ab5b8edebe6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -201,6 +201,10 @@ void remove(BytesRef uid, DeleteVersionValue deleted) { long getMinDeleteTimestamp() { return Math.min(current.minDeleteTimestamp.get(), old.minDeleteTimestamp.get()); } + + long ramBytesUsed() { + return current.ramBytesUsed.get() + old.ramBytesUsed.get(); + } } // All deletes also go here, and delete "tombstones" are retained after refresh: @@ -448,20 +452,20 @@ synchronized void clear() { @Override public long ramBytesUsed() { - return maps.current.ramBytesUsed.get() + ramBytesUsedTombstones.get(); + return maps.ramBytesUsed() + ramBytesUsedTombstones.get(); } /** - * Returns how much RAM would be freed up by refreshing. This is {@link #ramBytesUsed} except does not include tombstones because they - * don't clear on refresh. + * Returns how much RAM would be freed up by refreshing. This is the RAM usage of the current version map. It doesn't include tombstones + * since they don't get cleared on refresh, nor the old version map that is being reclaimed. */ long ramBytesUsedForRefresh() { return maps.current.ramBytesUsed.get(); } /** - * Returns how much RAM is current being freed up by refreshing. This is {@link #ramBytesUsed()} - * except does not include tombstones because they don't clear on refresh. + * Returns how much RAM is current being freed up by refreshing. This is the RAM usage of the previous version map that needs to stay + * around until operations are safely recorded in the Lucene index. */ long getRefreshingBytes() { return maps.old.ramBytesUsed.get(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index b38cc24e107ea..19345083bbc7b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -440,7 +440,7 @@ public void maybeRefresh(String source, ActionListener listener) } @Override - public void writeIndexingBuffer() throws EngineException {} + public void writeIndexingBuffer() {} @Override public boolean shouldPeriodicallyFlush() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java index ba45a700eebb5..082c2d898e637 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java @@ -8,41 +8,48 @@ package org.elasticsearch.index.mapper.vectors; +import org.apache.lucene.document.FeatureField; import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.xcontent.XContentParser.Token; -import java.time.ZoneId; +import java.io.IOException; import java.util.Map; +import static org.elasticsearch.index.query.AbstractQueryBuilder.DEFAULT_BOOST; + /** - * A {@link FieldMapper} for indexing a sparse vector of floats. - * - * @deprecated The sparse_vector type was deprecated in 7.x and removed in 8.0. This mapper - * definition only exists so that 7.x indices can be read without error. - * - * TODO: remove in 9.0. + * A {@link FieldMapper} that exposes Lucene's {@link FeatureField} as a sparse + * vector of features. */ -@Deprecated public class SparseVectorFieldMapper extends FieldMapper { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SparseVectorFieldMapper.class); - static final String ERROR_MESSAGE = "The [sparse_vector] field type is no longer supported."; - static final String ERROR_MESSAGE_7X = "The [sparse_vector] field type is no longer supported. Old 7.x indices are allowed to " - + "contain [sparse_vector] fields, but they cannot be indexed or searched."; + public static final String CONTENT_TYPE = "sparse_vector"; + static final String ERROR_MESSAGE_7X = "[sparse_vector] field type in old 7.x indices is allowed to " + + "contain [sparse_vector] fields, but they cannot be indexed or searched."; + static final String ERROR_MESSAGE_8X = "The [sparse_vector] field type is not supported from 8.0 to 8.10 versions."; + static final IndexVersion PREVIOUS_SPARSE_VECTOR_INDEX_VERSION = IndexVersion.V_8_0_0; + + static final IndexVersion NEW_SPARSE_VECTOR_INDEX_VERSION = IndexVersion.V_8_500_001; + public static class Builder extends FieldMapper.Builder { - final Parameter> meta = Parameter.metaParam(); + private final Parameter> meta = Parameter.metaParam(); public Builder(String name) { super(name); @@ -65,18 +72,19 @@ public SparseVectorFieldMapper build(MapperBuilderContext context) { } public static final TypeParser PARSER = new TypeParser((n, c) -> { - if (c.indexVersionCreated().onOrAfter(IndexVersion.V_8_0_0)) { - throw new IllegalArgumentException(ERROR_MESSAGE); - } else { + if (c.indexVersionCreated().before(PREVIOUS_SPARSE_VECTOR_INDEX_VERSION)) { deprecationLogger.warn(DeprecationCategory.MAPPINGS, "sparse_vector", ERROR_MESSAGE_7X); - return new Builder(n); + } else if (c.indexVersionCreated().before(NEW_SPARSE_VECTOR_INDEX_VERSION)) { + throw new IllegalArgumentException(ERROR_MESSAGE_8X); } - }); + + return new Builder(n); + }, notInMultiFields(CONTENT_TYPE)); public static final class SparseVectorFieldType extends MappedFieldType { public SparseVectorFieldType(String name, Map meta) { - super(name, false, false, false, TextSearchInfo.NONE, meta); + super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); } @Override @@ -85,28 +93,45 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, ZoneId timeZone) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + public Query existsQuery(SearchExecutionContext context) { + throw new IllegalArgumentException("[sparse_vector] fields do not support [exists] queries"); } @Override - public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + throw new IllegalArgumentException("[sparse_vector] fields do not support sorting, scripting or aggregating"); } @Override - public Query existsQuery(SearchExecutionContext context) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return SourceValueFetcher.identity(name(), context, format); } @Override public Query termQuery(Object value, SearchExecutionContext context) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + return FeatureField.newLinearQuery(name(), indexedValueForSearch(value), DEFAULT_BOOST); + } + + private static String indexedValueForSearch(Object value) { + if (value instanceof BytesRef) { + return ((BytesRef) value).utf8ToString(); + } + return value.toString(); } } private SparseVectorFieldMapper(String simpleName, MappedFieldType mappedFieldType, MultiFields multiFields, CopyTo copyTo) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + } + + @Override + public Map indexAnalyzers() { + return Map.of(mappedFieldType.name(), Lucene.KEYWORD_ANALYZER); + } + + @Override + public FieldMapper.Builder getMergeBuilder() { + return new Builder(simpleName()).init(this); } @Override @@ -115,13 +140,67 @@ public SparseVectorFieldType fieldType() { } @Override - public void parse(DocumentParserContext context) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + protected boolean supportsParsingObject() { + return true; + } + + @Override + public void parse(DocumentParserContext context) throws IOException { + + // No support for indexing / searching 7.x sparse_vector field types + if (context.indexSettings().getIndexVersionCreated().before(PREVIOUS_SPARSE_VECTOR_INDEX_VERSION)) { + throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + } else if (context.indexSettings().getIndexVersionCreated().before(NEW_SPARSE_VECTOR_INDEX_VERSION)) { + throw new UnsupportedOperationException(ERROR_MESSAGE_8X); + } + + if (context.parser().currentToken() != Token.START_OBJECT) { + throw new IllegalArgumentException( + "[sparse_vector] fields must be json objects, expected a START_OBJECT but got: " + context.parser().currentToken() + ); + } + + String feature = null; + try { + // make sure that we don't expand dots in field names while parsing + context.path().setWithinLeafObject(true); + for (Token token = context.parser().nextToken(); token != Token.END_OBJECT; token = context.parser().nextToken()) { + if (token == Token.FIELD_NAME) { + feature = context.parser().currentName(); + if (feature.contains(".")) { + throw new IllegalArgumentException( + "[sparse_vector] fields do not support dots in feature names but found [" + feature + "]" + ); + } + } else if (token == Token.VALUE_NULL) { + // ignore feature, this is consistent with numeric fields + } else if (token == Token.VALUE_NUMBER || token == Token.VALUE_STRING) { + final String key = name() + "." + feature; + float value = context.parser().floatValue(true); + if (context.doc().getByKey(key) != null) { + throw new IllegalArgumentException( + "[sparse_vector] fields do not support indexing multiple values for the same feature [" + + key + + "] in the same document" + ); + } + context.doc().addWithKey(key, new FeatureField(name(), feature, value)); + } else { + throw new IllegalArgumentException( + "[sparse_vector] fields take hashes that map a feature to a strictly positive " + + "float, but got unexpected token " + + token + ); + } + } + } finally { + context.path().setWithinLeafObject(false); + } } @Override protected void parseCreateField(DocumentParserContext context) { - throw new IllegalStateException("parse is implemented directly"); + throw new AssertionError("parse is implemented directly"); } @Override @@ -129,8 +208,4 @@ protected String contentType() { return CONTENT_TYPE; } - @Override - public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); - } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 9f20b1d89fa1c..c0271ad30d720 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.engine.Engine; @@ -29,11 +28,15 @@ import java.io.Closeable; import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Deque; import java.util.EnumSet; import java.util.HashSet; import java.util.List; -import java.util.PriorityQueue; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; @@ -105,6 +108,9 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos private final ShardsIndicesStatusChecker statusChecker; + private final Set pendingWriteIndexingBufferSet = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final Deque pendingWriteIndexingBufferQueue = new ConcurrentLinkedDeque<>(); + IndexingMemoryController(Settings settings, ThreadPool threadPool, Iterable indexServices) { this.indexShards = indexServices; @@ -183,19 +189,41 @@ protected long getShardWritingBytes(IndexShard shard) { return shard.getWritingBytes(); } - /** ask this shard to refresh, in the background, to free up heap */ - protected void writeIndexingBufferAsync(IndexShard shard) { - threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() { - @Override - public void doRun() { - shard.writeIndexingBuffer(); - } + /** Record that the given shard needs to write its indexing buffer. */ + protected void enqueueWriteIndexingBuffer(IndexShard shard) { + if (pendingWriteIndexingBufferSet.add(shard)) { + pendingWriteIndexingBufferQueue.addLast(shard); + } + // Else there is already a queued task for the same shard and there is no evidence that adding another one is required since we'd + // need the first one to start running to know about the number of bytes still not being written. + } - @Override - public void onFailure(Exception e) { - logger.warn(() -> "failed to write indexing buffer for shard [" + shard.shardId() + "]; ignoring", e); - } - }); + /** + * Write pending indexing buffers. This should run on indexing threads in order to naturally apply back pressure on indexing. Lucene has + * similar logic in DocumentsWriter#postUpdate. + */ + private boolean writePendingIndexingBuffers() { + boolean wrotePendingIndexingBuffer = false; + for (IndexShard shard = pendingWriteIndexingBufferQueue.pollFirst(); shard != null; shard = pendingWriteIndexingBufferQueue + .pollFirst()) { + // Remove the shard from the set first, so that multiple threads can run writeIndexingBuffer concurrently on the same shard. + pendingWriteIndexingBufferSet.remove(shard); + shard.writeIndexingBuffer(); + wrotePendingIndexingBuffer = true; + } + return wrotePendingIndexingBuffer; + } + + private void writePendingIndexingBuffersAsync() { + for (IndexShard shard = pendingWriteIndexingBufferQueue.pollFirst(); shard != null; shard = pendingWriteIndexingBufferQueue + .pollFirst()) { + final IndexShard finalShard = shard; + threadPool.executor(ThreadPool.Names.REFRESH).execute(() -> { + // Remove the shard from the set first, so that multiple threads can run writeIndexingBuffer concurrently on the same shard. + pendingWriteIndexingBufferSet.remove(finalShard); + finalShard.writeIndexingBuffer(); + }); + } } /** force checker to run now */ @@ -215,12 +243,26 @@ protected void deactivateThrottling(IndexShard shard) { @Override public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { - recordOperationBytes(index, result); + postOperation(shardId, index, result); } @Override public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) { - recordOperationBytes(delete, result); + postOperation(shardId, delete, result); + } + + private void postOperation(ShardId shardId, Engine.Operation operation, Engine.Result result) { + recordOperationBytes(operation, result); + // Piggy back on indexing threads to write segments. We're not submitting a task to the index threadpool because we want memory to + // be reclaimed rapidly. This has the downside of increasing the latency of _bulk requests though. Lucene does the same thing in + // DocumentsWriter#postUpdate, flushing a segment because the size limit on the RAM buffer was reached happens on the call to + // IndexWriter#addDocument. + while (writePendingIndexingBuffers()) { + // If we just wrote segments, then run the checker again if not already running to check if we released enough memory. + if (statusChecker.tryRun() == false) { + break; + } + } } /** called by IndexShard to record estimated bytes written to translog for the operation */ @@ -230,7 +272,7 @@ private void recordOperationBytes(Engine.Operation operation, Engine.Result resu } } - private static final class ShardAndBytesUsed implements Comparable { + private static final class ShardAndBytesUsed { final long bytesUsed; final IndexShard shard; @@ -239,11 +281,6 @@ private static final class ShardAndBytesUsed implements Comparable= 0; - while (totalBytes > indexingBuffer.getBytes() / 30) { + while (totalBytes > indexingBuffer.getBytes() / 128) { if (runLock.tryLock()) { try { // Must pull this again because it may have changed since we first checked: totalBytes = bytesWrittenSinceCheck.get(); - if (totalBytes > indexingBuffer.getBytes() / 30) { + if (totalBytes > indexingBuffer.getBytes() / 128) { bytesWrittenSinceCheck.addAndGet(-totalBytes); // NOTE: this is only an approximate check, because bytes written is to the translog, // vs indexing memory buffer which is typically smaller but can be larger in extreme @@ -284,8 +323,24 @@ public void bytesWritten(int bytes) { } } + public boolean tryRun() { + if (runLock.tryLock()) { + try { + runUnlocked(); + } finally { + runLock.unlock(); + } + return true; + } else { + return false; + } + } + @Override public void run() { + // If there are any remainders from the previous check, schedule them now. Most of the time, indexing threads would have taken + // care of these indexing buffers before, and we wouldn't need to do it here. + writePendingIndexingBuffersAsync(); runLock.lock(); try { runUnlocked(); @@ -295,6 +350,7 @@ public void run() { } private void runUnlocked() { + assert runLock.isHeldByCurrentThread() : "ShardsIndicesStatusChecker#runUnlocked must always run under the run lock"; // NOTE: even if we hit an errant exc here, our ThreadPool.scheduledWithFixedDelay will log the exception and re-invoke us // again, on schedule @@ -341,7 +397,7 @@ private void runUnlocked() { if (totalBytesUsed > indexingBuffer.getBytes()) { // OK we are now over-budget; fill the priority queue and ask largest shard(s) to refresh: - PriorityQueue queue = new PriorityQueue<>(); + List queue = new ArrayList<>(); for (IndexShard shard : availableShards()) { // How many bytes this shard is currently (async'd) moving from heap to disk: @@ -386,21 +442,56 @@ private void runUnlocked() { queue.size() ); - while (totalBytesUsed > indexingBuffer.getBytes() && queue.isEmpty() == false) { - ShardAndBytesUsed largest = queue.poll(); + // What is the best order to go over shards and reclaim memory usage? Interestingly, picking random shards performs _much_ + // better than picking the largest shard when trying to optimize for the elastic/logs Rally track. One explanation for this + // is that Lucene's IndexWriter creates new pending segments in memory in order to satisfy indexing concurrency. E.g. if N + // indexing threads suddenly index into the same IndexWriter, then the IndexWriter will have N pending segments in memory. + // However, it's likely that indexing concurrency is not constant on a per-shard basis, especially when indexing into many + // shards concurrently. So there are chances that if we flush a single segment now, then it won't be re-created shortly + // because the peak indexing concurrency is rarely observed, and we end up indexing into fewer pending segments globally on + // average, which in-turn reduces the total number of segments that get produced, and also reduces merging. + // The downside of picking the shard that has the biggest indexing buffer is that it is often also the shard that has the + // highest ingestion rate, and thus it is also the shard that is the most likely to re-create a new pending segment in the + // very near future after one segment has been flushed. + + // We want to go over shards in a round-robin fashion across calls to #runUnlocked. First sort shards by something stable + // like the shard ID. + queue.sort(Comparator.comparing(shardAndBytes -> shardAndBytes.shard.shardId())); + if (lastShardId != null) { + // Then rotate the list so that the first shard that is greater than the ID of the last shard whose indexing buffer was + // written comes first. + int nextShardIdIndex = 0; + for (ShardAndBytesUsed shardAndBytes : queue) { + if (shardAndBytes.shard.shardId().compareTo(lastShardId) > 0) { + break; + } + nextShardIdIndex++; + } + Collections.rotate(queue, -nextShardIdIndex); + } + + for (ShardAndBytesUsed shardAndBytesUsed : queue) { logger.debug( "write indexing buffer to disk for shard [{}] to free up its [{}] indexing buffer", - largest.shard.shardId(), - ByteSizeValue.ofBytes(largest.bytesUsed) + shardAndBytesUsed.shard.shardId(), + ByteSizeValue.ofBytes(shardAndBytesUsed.bytesUsed) ); - writeIndexingBufferAsync(largest.shard); - totalBytesUsed -= largest.bytesUsed; - if (doThrottle && throttled.contains(largest.shard) == false) { - logger.info("now throttling indexing for shard [{}]: segment writing can't keep up", largest.shard.shardId()); - throttled.add(largest.shard); - activateThrottling(largest.shard); + enqueueWriteIndexingBuffer(shardAndBytesUsed.shard); + totalBytesUsed -= shardAndBytesUsed.bytesUsed; + lastShardId = shardAndBytesUsed.shard.shardId(); + if (doThrottle && throttled.contains(shardAndBytesUsed.shard) == false) { + logger.debug( + "now throttling indexing for shard [{}]: segment writing can't keep up", + shardAndBytesUsed.shard.shardId() + ); + throttled.add(shardAndBytesUsed.shard); + activateThrottling(shardAndBytesUsed.shard); + } + if (totalBytesUsed <= indexingBuffer.getBytes()) { + break; } } + } if (doThrottle == false) { diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java index 271d872d76572..f1ef10c38372b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java @@ -28,7 +28,7 @@ public class RepositoriesStats implements Writeable, ToXContentFragment { private final Map repositoryThrottlingStats; public RepositoriesStats(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { repositoryThrottlingStats = in.readMap(ThrottlingStats::new); } else { repositoryThrottlingStats = new HashMap<>(); @@ -41,7 +41,7 @@ public RepositoriesStats(Map repositoryThrottlingStats) @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeMap(repositoryThrottlingStats, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java index dfc0b46dc0837..a2ce68e3fc29e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java @@ -233,7 +233,7 @@ private FilterByFilterAggregator( @Override protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { assert scoreMode().needsScores() == false; - if (QueryToFilterAdapter.MatchesNoDocs(filters())) { + if (QueryToFilterAdapter.matchesNoDocs(filters())) { return LeafBucketCollector.NO_OP_COLLECTOR; } Bits live = aggCtx.getLeafReaderContext().reader().getLiveDocs(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java index 4cc3f976c6da2..e0792fca6c28f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java @@ -9,8 +9,13 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DisiPriorityQueue; +import org.apache.lucene.search.DisiWrapper; +import org.apache.lucene.search.DisjunctionDISIApproximation; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.Scorer; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -38,7 +43,6 @@ import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; -import java.util.function.IntPredicate; import java.util.function.LongPredicate; /** @@ -289,33 +293,210 @@ static class Compatible extends FiltersAggregator { @Override protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { - if (QueryToFilterAdapter.MatchesNoDocs(filters()) && otherBucketKey == null) { + if (QueryToFilterAdapter.matchesNoDocs(filters()) && otherBucketKey == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - IntPredicate[] docFilters = new IntPredicate[filters().size()]; - for (int filterOrd = 0; filterOrd < filters().size(); filterOrd++) { - docFilters[filterOrd] = filters().get(filterOrd).matchingDocIds(aggCtx.getLeafReaderContext()); + final int numFilters = filters().size(); + List filterWrappers = new ArrayList<>(); + long totalCost = 0; + for (int filterOrd = 0; filterOrd < numFilters; filterOrd++) { + Scorer randomAccessScorer = filters().get(filterOrd).randomAccessScorer(aggCtx.getLeafReaderContext()); + if (randomAccessScorer == null) { + continue; + } + totalCost += randomAccessScorer.iterator().cost(); + filterWrappers.add( + randomAccessScorer.twoPhaseIterator() == null + ? new FilterMatchingDisiWrapper(randomAccessScorer, filterOrd) + : new TwoPhaseFilterMatchingDisiWrapper(randomAccessScorer, filterOrd) + ); } - return new LeafBucketCollectorBase(sub, null) { - @Override - public void collect(int doc, long bucket) throws IOException { - boolean matched = false; - for (int i = 0; i < docFilters.length; i++) { - if (docFilters[i].test(doc)) { - collectBucket(sub, doc, bucketOrd(bucket, i)); + + // Restrict the use of competitive iterator when there's no parent agg, no 'other' bucket (all values are accessed then) + // and the cost of per-filter doc iterator is smaller than maxDoc, indicating that there are docs matching the main + // query but not the filter query. + final boolean hasOtherBucket = otherBucketKey != null; + final boolean usesCompetitiveIterator = (parent == null + && hasOtherBucket == false + && filterWrappers.isEmpty() == false + && totalCost < aggCtx.getLeafReaderContext().reader().maxDoc()); + + if (filterWrappers.size() == 1) { + return new SingleFilterLeafCollector( + sub, + filterWrappers.get(0), + numFilters, + totalNumKeys, + usesCompetitiveIterator, + hasOtherBucket + ); + } + return new MultiFilterLeafCollector(sub, filterWrappers, numFilters, totalNumKeys, usesCompetitiveIterator, hasOtherBucket); + } + } + + private abstract static class AbstractLeafCollector extends LeafBucketCollectorBase { + final LeafBucketCollector sub; + final int numFilters; + final int totalNumKeys; + final boolean usesCompetitiveIterator; + final boolean hasOtherBucket; + + AbstractLeafCollector( + LeafBucketCollector sub, + int numFilters, + int totalNumKeys, + boolean usesCompetitiveIterator, + boolean hasOtherBucket + ) { + super(sub, null); + this.sub = sub; + this.numFilters = numFilters; + this.totalNumKeys = totalNumKeys; + this.usesCompetitiveIterator = usesCompetitiveIterator; + this.hasOtherBucket = hasOtherBucket; + } + + final long bucketOrd(long owningBucketOrdinal, int filterOrd) { + return owningBucketOrdinal * totalNumKeys + filterOrd; + } + } + + private class SingleFilterLeafCollector extends AbstractLeafCollector { + + final FilterMatchingDisiWrapper filterWrapper; + + SingleFilterLeafCollector( + LeafBucketCollector sub, + FilterMatchingDisiWrapper filterWrapper, + int numFilters, + int totalNumKeys, + boolean usesCompetitiveIterator, + boolean hasOtherBucket + ) { + super(sub, numFilters, totalNumKeys, usesCompetitiveIterator, hasOtherBucket); + this.filterWrapper = filterWrapper; + } + + public void collect(int doc, long bucket) throws IOException { + if (filterWrapper.approximation.docID() < doc) { + filterWrapper.approximation.advance(doc); + } + boolean matched = false; + if (filterWrapper.approximation.docID() == doc) { + if (filterWrapper.checkDocForMatch(doc)) { + collectBucket(sub, doc, bucketOrd(bucket, filterWrapper.filterOrd)); + matched = true; + } + } + if (hasOtherBucket && false == matched) { + collectBucket(sub, doc, bucketOrd(bucket, numFilters)); + } + } + + @Override + public DocIdSetIterator competitiveIterator() throws IOException { + if (usesCompetitiveIterator) { + return filterWrapper.approximation; + } + return null; + } + } + + private class MultiFilterLeafCollector extends AbstractLeafCollector { + + // A DocIdSetIterator heap with one entry for each filter, ordered by doc ID + final DisiPriorityQueue filterIterators; + + MultiFilterLeafCollector( + LeafBucketCollector sub, + List filterWrappers, + int numFilters, + int totalNumKeys, + boolean usesCompetitiveIterator, + boolean hasOtherBucket + ) { + super(sub, numFilters, totalNumKeys, usesCompetitiveIterator, hasOtherBucket); + filterIterators = filterWrappers.isEmpty() ? null : new DisiPriorityQueue(filterWrappers.size()); + for (FilterMatchingDisiWrapper wrapper : filterWrappers) { + filterIterators.add(wrapper); + } + } + + public void collect(int doc, long bucket) throws IOException { + boolean matched = false; + if (filterIterators != null) { + // Advance filters if necessary. Filters will already be advanced if used as a competitive iterator. + DisiWrapper top = filterIterators.top(); + while (top.doc < doc) { + top.doc = top.approximation.advance(doc); + top = filterIterators.updateTop(); + } + + if (top.doc == doc) { + for (DisiWrapper w = filterIterators.topList(); w != null; w = w.next) { + // It would be nice if DisiPriorityQueue supported generics to avoid unchecked casts. + FilterMatchingDisiWrapper topMatch = (FilterMatchingDisiWrapper) w; + if (topMatch.checkDocForMatch(doc)) { + collectBucket(sub, doc, bucketOrd(bucket, topMatch.filterOrd)); matched = true; } } - if (otherBucketKey != null && false == matched) { - collectBucket(sub, doc, bucketOrd(bucket, docFilters.length)); - } } - }; + } + + if (hasOtherBucket && false == matched) { + collectBucket(sub, doc, bucketOrd(bucket, numFilters)); + } } - final long bucketOrd(long owningBucketOrdinal, int filterOrd) { - return owningBucketOrdinal * totalNumKeys + filterOrd; + @Override + public DocIdSetIterator competitiveIterator() throws IOException { + if (usesCompetitiveIterator) { + // A DocIdSetIterator view of the filterIterators heap + assert filterIterators != null; + return new DisjunctionDISIApproximation(filterIterators); + } + return null; + } + } + + private static class FilterMatchingDisiWrapper extends DisiWrapper { + final int filterOrd; + + FilterMatchingDisiWrapper(Scorer scorer, int ord) { + super(scorer); + this.filterOrd = ord; + } + + boolean checkDocForMatch(int doc) throws IOException { + return true; + } + } + + private static class TwoPhaseFilterMatchingDisiWrapper extends FilterMatchingDisiWrapper { + // Tracks the last doc that matches the filter. + int lastMatchingDoc = -1; + // Tracks the last doc that was checked for filter matching. + int lastCheckedDoc = -1; + + TwoPhaseFilterMatchingDisiWrapper(Scorer scorer, int ord) { + super(scorer, ord); + } + + @Override + boolean checkDocForMatch(int doc) throws IOException { + // We need to cache the result of twoPhaseView.matches() since it's illegal to call it multiple times on the + // same doc, yet LeafBucketCollector#collect may be called multiple times with the same doc and multiple + // buckets. + if (lastCheckedDoc < doc) { + lastCheckedDoc = doc; + if (twoPhaseView.matches()) { + lastMatchingDoc = doc; + } + } + return (lastMatchingDoc == doc); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java index 36afc481c1723..f7a613fbe142b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java @@ -21,17 +21,17 @@ import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.List; import java.util.function.BiConsumer; -import java.util.function.IntPredicate; /** * Adapts a Lucene {@link Query} to the behaviors used be the @@ -171,16 +171,20 @@ private static Query unwrap(Query query) { } /** - * Build a predicate that the "compatible" implementation of the - * {@link FiltersAggregator} will use to figure out if the filter matches. - *

- * Consumers of this method will always call it with non-negative, - * increasing {@code int}s. A sequence like {@code 0, 1, 7, 8, 10} is fine. - * It won't call with {@code 0, 1, 0} or {@code -1, 0, 1}. + * Returns the {@link Scorer} that the "compatible" implementation of the {@link FiltersAggregator} will use + * to get an iterator over the docs matching the filter. The scorer is optimized for random access, since + * it will be skipping documents that don't match the main query or other filters. + * If the passed context contains no scorer, it returns a dummy scorer that matches no docs. */ - @SuppressWarnings("resource") // Closing the reader is someone else's problem - IntPredicate matchingDocIds(LeafReaderContext ctx) throws IOException { - return Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), weight().scorerSupplier(ctx))::get; + Scorer randomAccessScorer(LeafReaderContext ctx) throws IOException { + Weight weight = weight(); + ScorerSupplier scorerSupplier = weight.scorerSupplier(ctx); + if (scorerSupplier == null) { + return null; + } + + // A leading cost of 0 instructs the scorer to optimize for random access as opposed to sequential access + return scorerSupplier.get(0L); } /** @@ -255,7 +259,7 @@ private Weight weight() throws IOException { * @param filters list of filters to check * @return true if all filters match no docs, otherwise false */ - static boolean MatchesNoDocs(List filters) { + static boolean matchesNoDocs(List filters) { for (QueryToFilterAdapter filter : filters) { if (filter.query() instanceof MatchNoDocsQuery == false) { return false; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java index b30bf50e5a8ea..6f72e56306459 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -63,10 +63,8 @@ public MedianAbsoluteDeviationAggregationBuilder(String name) { public MedianAbsoluteDeviationAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); - } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - executionHint = TDigestExecutionHint.readFrom(in); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; } @@ -128,10 +126,8 @@ protected ValuesSourceType defaultValueSourceType() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(executionHint); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - (executionHint == null ? TDigestExecutionHint.DEFAULT : executionHint).writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java index a9c567b9dd22d..aa2ffd9730306 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java @@ -143,8 +143,8 @@ public TDigest(double compression, TDigestExecutionHint executionHint) { TDigest(StreamInput in) throws IOException { this( in.readDouble(), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018) ? in.readOptionalWriteable(TDigestExecutionHint::readFrom) - : in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014) ? TDigestExecutionHint.readFrom(in) + in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020) + ? in.readOptionalWriteable(TDigestExecutionHint::readFrom) : TDigestExecutionHint.HIGH_ACCURACY ); } @@ -248,10 +248,8 @@ public InternalNumericMetricsAggregation.MultiValue createEmptyPercentileRanksAg public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(executionHint); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - (executionHint == null ? TDigestExecutionHint.DEFAULT : executionHint).writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java index 2fbe2e679c1ab..f899585460a62 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java @@ -111,7 +111,7 @@ public final double compression() { public static void write(TDigestState state, StreamOutput out) throws IOException { out.writeDouble(state.compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeString(state.type.toString()); out.writeVLong(state.tdigest.size()); } @@ -127,7 +127,7 @@ public static TDigestState read(StreamInput in) throws IOException { double compression = in.readDouble(); TDigestState state; long size = 0; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { state = new TDigestState(Type.valueOf(in.readString()), compression); size = in.readVLong(); } else { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 434dae7115812..6799a6a69179b 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -216,7 +216,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { indexBoosts = in.readCollectionAsList(IndexBoost::new); minScore = in.readOptionalFloat(); postQueryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_013)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { subSearchSourceBuilders = in.readCollectionAsList(SubSearchSourceBuilder::new); } else { QueryBuilder queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); @@ -290,7 +290,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indexBoosts); out.writeOptionalFloat(minScore); out.writeOptionalNamedWriteable(postQueryBuilder); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_013)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeCollection(subSearchSourceBuilders); } else if (out.getTransportVersion().before(TransportVersion.V_8_4_0) && subSearchSourceBuilders.size() >= 2) { throw new IllegalArgumentException("cannot serialize [sub_searches] to version [" + out.getTransportVersion() + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 3c75c1e9ffebb..66d61068f745d 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -277,8 +277,8 @@ public ShardSearchRequest(StreamInput in) throws IOException { numberOfShards = in.readVInt(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) && in.getTransportVersion().before(TransportVersion.V_8_500_013)) { - // to deserialize between the 8.8 and 8.500.013 version we need to translate + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) && in.getTransportVersion().before(TransportVersion.V_8_500_020)) { + // to deserialize between the 8.8 and 8.500.020 version we need to translate // the rank queries into sub searches if we are ranking; if there are no rank queries // we deserialize the empty list and do nothing List rankQueryBuilders = in.readNamedWriteableCollectionAsList(QueryBuilder.class); @@ -371,11 +371,11 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) - && out.getTransportVersion().before(TransportVersion.V_8_500_013)) { - // to serialize between the 8.8 and 8.500.013 version we need to translate + && out.getTransportVersion().before(TransportVersion.V_8_500_020)) { + // to serialize between the 8.8 and 8.500.020 version we need to translate // the sub searches into rank queries if we are ranking, otherwise, we // ignore this because linear combination will have multiple sub searches in - // 8.500.013+, but only use the combined boolean query in prior versions + // 8.500.020+, but only use the combined boolean query in prior versions List rankQueryBuilders = new ArrayList<>(); if (source != null && source.rankBuilder() != null && source.subSearches().size() >= 2) { for (SubSearchSourceBuilder subSearchSourceBuilder : source.subSearches()) { diff --git a/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java b/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java index 2592800016745..ad724aa190e18 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java +++ b/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java @@ -172,9 +172,10 @@ final class ReschedulingRunnable extends AbstractRunnable implements Cancellable private final Runnable runnable; private final TimeValue interval; + private final Executor executor; + private final Scheduler scheduler; private final Consumer rejectionConsumer; private final Consumer failureConsumer; - private final Runnable doSchedule; private volatile boolean run = true; @@ -196,35 +197,17 @@ final class ReschedulingRunnable extends AbstractRunnable implements Cancellable ) { this.runnable = runnable; this.interval = interval; + this.executor = executor; + this.scheduler = scheduler; this.rejectionConsumer = rejectionConsumer; this.failureConsumer = failureConsumer; - this.doSchedule = () -> scheduler.schedule(this, interval, executor); - } - - /** - * @deprecated Use {@link #ReschedulingRunnable(Runnable, TimeValue, Executor, Scheduler, Consumer, Consumer)}} instead. - */ - @Deprecated(forRemoval = true) - ReschedulingRunnable( - Runnable runnable, - TimeValue interval, - String executor, - Scheduler scheduler, - Consumer rejectionConsumer, - Consumer failureConsumer - ) { - this.runnable = runnable; - this.interval = interval; - this.rejectionConsumer = rejectionConsumer; - this.failureConsumer = failureConsumer; - this.doSchedule = () -> scheduler.schedule(this, interval, executor); } /** * Schedules the first execution of this runnable */ void start() { - doSchedule.run(); + scheduler.schedule(this, interval, executor); } @Override @@ -263,7 +246,7 @@ public void onAfter() { // if this has not been cancelled reschedule it to run again if (run) { try { - doSchedule.run(); + scheduler.schedule(this, interval, executor); } catch (final EsRejectedExecutionException e) { onRejection(e); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index a8a4925441709..df0bb0174e542 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -21,10 +21,8 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Base class for all services and components that need up-to-date information about the registered remote clusters @@ -149,7 +147,7 @@ void validateAndUpdateRemoteCluster(String clusterAlias, Settings settings) { * Registers this instance to listen to updates on the cluster settings. */ public void listenForUpdates(ClusterSettings clusterSettings) { - List> remoteClusterSettings = Stream.of( + List> remoteClusterSettings = List.of( RemoteClusterService.REMOTE_CLUSTER_COMPRESS, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteConnectionStrategy.REMOTE_CONNECTION_MODE, @@ -159,7 +157,7 @@ public void listenForUpdates(ClusterSettings clusterSettings) { ProxyConnectionStrategy.PROXY_ADDRESS, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, ProxyConnectionStrategy.SERVER_NAME - ).filter(Objects::nonNull).collect(Collectors.toList()); + ); clusterSettings.addAffixGroupUpdateConsumer(remoteClusterSettings, this::validateAndUpdateRemoteCluster); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 4a3ea5b61e51c..9542d4b366ded 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -309,13 +309,13 @@ protected void updateRemoteCluster(String clusterAlias, Settings settings) { CountDownLatch latch = new CountDownLatch(1); updateRemoteCluster(clusterAlias, settings, ActionListener.runAfter(new ActionListener<>() { @Override - public void onResponse(Void o) { - logger.debug("connected to new remote cluster [{}]", clusterAlias); + public void onResponse(RemoteClusterConnectionStatus status) { + logger.info("remote cluster connection [{}] updated: {}", clusterAlias, status); } @Override public void onFailure(Exception e) { - logger.debug(() -> "connection to new remote cluster [" + clusterAlias + "] failed", e); + logger.warn(() -> "failed to update remote cluster connection [" + clusterAlias + "]", e); } }, latch::countDown)); @@ -324,7 +324,7 @@ public void onFailure(Exception e) { // are on the cluster state thread and our custom future implementation will throw an // assertion. if (latch.await(10, TimeUnit.SECONDS) == false) { - logger.warn("failed to connect to new remote cluster [{}] within {}", clusterAlias, TimeValue.timeValueSeconds(10)); + logger.warn("failed to update remote cluster connection [{}] within {}", clusterAlias, TimeValue.timeValueSeconds(10)); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -338,7 +338,11 @@ public void onFailure(Exception e) { * @param newSettings the updated settings for the remote connection * @param listener a listener invoked once every configured cluster has been connected to */ - synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, ActionListener listener) { + synchronized void updateRemoteCluster( + String clusterAlias, + Settings newSettings, + ActionListener listener + ) { if (LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); } @@ -351,7 +355,7 @@ synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); } remoteClusters.remove(clusterAlias); - listener.onResponse(null); + listener.onResponse(RemoteClusterConnectionStatus.DISCONNECTED); return; } @@ -365,7 +369,7 @@ synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, credentialsProtectedRemoteClusters.contains(clusterAlias) ); remoteClusters.put(clusterAlias, remote); - remote.ensureConnected(listener); + remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.CONNECTED)); } else if (remote.shouldRebuildConnection(newSettings)) { // Changes to connection configuration. Must tear down existing connection try { @@ -382,13 +386,20 @@ synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, credentialsProtectedRemoteClusters.contains(clusterAlias) ); remoteClusters.put(clusterAlias, remote); - remote.ensureConnected(listener); + remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.RECONNECTED)); } else { // No changes to connection configuration. - listener.onResponse(null); + listener.onResponse(RemoteClusterConnectionStatus.UNCHANGED); } } + enum RemoteClusterConnectionStatus { + CONNECTED, + DISCONNECTED, + RECONNECTED, + UNCHANGED + } + /** * Connects to all remote clusters in a blocking fashion. This should be called on node startup to establish an initial connection * to all configured seed nodes. @@ -404,7 +415,7 @@ void initializeRemoteClusters() { CountDownActionListener listener = new CountDownActionListener(enabledClusters.size(), future); for (String clusterAlias : enabledClusters) { - updateRemoteCluster(clusterAlias, settings, listener); + updateRemoteCluster(clusterAlias, settings, listener.map(ignored -> null)); } if (enabledClusters.isEmpty()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index e21b4adfe3928..2e3455b6e9b1f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -87,7 +87,7 @@ public void testToXContentWithDeprecatedClusterState() { assertXContent( createClusterRerouteResponse(clusterState), ToXContent.EMPTY_PARAMS, - 35, + 38, Strings.format( """ { @@ -132,6 +132,12 @@ public void testToXContentWithDeprecatedClusterState() { "transport_version": "8000099" } ], + "nodes_versions": [ + { + "node_id": "node0", + "transport_version": "8000099" + } + ], "metadata": { "cluster_uuid": "_na_", "cluster_uuid_committed": false, diff --git a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java index 6b7b1ab63653d..a2ca142f982ac 100644 --- a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java @@ -95,7 +95,7 @@ protected OpenPointInTimeRequest mutateInstance(OpenPointInTimeRequest in) throw } public void testUseDefaultConcurrentForOldVersion() throws Exception { - TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_500_017); + TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_500_020); try (BytesStreamOutput output = new BytesStreamOutput()) { TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_0_0, previousVersion); output.setTransportVersion(version); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index aa0c7ad6d499a..acdbcd9f512a8 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -153,7 +153,7 @@ public void testRandomVersionSerialization() throws IOException { // Versions before 8.8 don't support rank searchRequest.source().rankBuilder(null); } - if (version.before(TransportVersion.V_8_500_013) && searchRequest.source() != null) { + if (version.before(TransportVersion.V_8_500_020) && searchRequest.source() != null) { // Versions before 8_500_999 don't support queries searchRequest.source().subSearches(new ArrayList<>()); } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index aa7cb95c196ab..c453a15f7a16e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -136,7 +136,7 @@ public void testToXContent() throws IOException { clusterState, builder, new ToXContent.MapParams(singletonMap(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_API)), - 41 + 44 ); builder.endObject(); @@ -212,6 +212,12 @@ public void testToXContent() throws IOException { "transport_version" : "%s" } ], + "nodes_versions" : [ + { + "node_id" : "nodeId1", + "transport_version" : "%s" + } + ], "metadata": { "cluster_uuid": "clusterUUID", "cluster_uuid_committed": false, @@ -365,6 +371,7 @@ public void testToXContent() throws IOException { IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), + TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), allocationId, @@ -393,7 +400,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); - writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams), 41); + writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams), 44); builder.endObject(); assertEquals( @@ -467,6 +474,12 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "transport_version" : "%s" } ], + "nodes_versions" : [ + { + "node_id" : "nodeId1", + "transport_version" : "%s" + } + ], "metadata" : { "cluster_uuid" : "clusterUUID", "cluster_uuid_committed" : false, @@ -616,6 +629,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), + TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), allocationId, @@ -644,7 +658,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); - writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams), 41); + writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams), 44); builder.endObject(); assertEquals( @@ -718,6 +732,12 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "transport_version" : "%s" } ], + "nodes_versions" : [ + { + "node_id" : "nodeId1", + "transport_version" : "%s" + } + ], "metadata" : { "cluster_uuid" : "clusterUUID", "cluster_uuid_committed" : false, @@ -873,6 +893,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), + TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), allocationId, @@ -919,7 +940,7 @@ public void testToXContentSameTypeName() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); - writeChunks(clusterState, builder, ToXContent.EMPTY_PARAMS, 27); + writeChunks(clusterState, builder, ToXContent.EMPTY_PARAMS, 29); builder.endObject(); assertEquals(Strings.format(""" @@ -931,6 +952,7 @@ public void testToXContentSameTypeName() throws IOException { "blocks" : { }, "nodes" : { }, "transport_versions" : [ ], + "nodes_versions" : [ ], "metadata" : { "cluster_uuid" : "clusterUUID", "cluster_uuid_committed" : false, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 038b27bb86bda..acdcf85d7cb38 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; @@ -160,21 +161,24 @@ public void testPreventJoinClusterWithUnsupportedTransportVersion() { .mapToObj(i -> TransportVersionUtils.randomCompatibleVersion(random())) .toList(); TransportVersion min = Collections.min(versions); + List compatibilityVersions = versions.stream().map(CompatibilityVersions::new).toList(); // should not throw NodeJoinExecutor.ensureTransportVersionBarrier( - TransportVersionUtils.randomVersionBetween(random(), min, TransportVersion.current()), - versions + new CompatibilityVersions(TransportVersionUtils.randomVersionBetween(random(), min, TransportVersion.current())), + compatibilityVersions ); expectThrows( IllegalStateException.class, () -> NodeJoinExecutor.ensureTransportVersionBarrier( - TransportVersionUtils.randomVersionBetween( - random(), - TransportVersionUtils.getFirstVersion(), - TransportVersionUtils.getPreviousVersion(min) + new CompatibilityVersions( + TransportVersionUtils.randomVersionBetween( + random(), + TransportVersionUtils.getFirstVersion(), + TransportVersionUtils.getPreviousVersion(min) + ) ), - versions + compatibilityVersions ) ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java index 8d794786881da..4f03cbe3a1fc0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.coordination; import org.apache.logging.log4j.Level; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -18,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; @@ -72,9 +72,11 @@ public void testRerouteAfterRemovingNodes() throws Exception { protected ClusterState remainingNodesClusterState( ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder, - Map transportVersions + Map compatibilityVersions ) { - remainingNodesClusterState.set(super.remainingNodesClusterState(currentState, remainingNodesBuilder, transportVersions)); + remainingNodesClusterState.set( + super.remainingNodesClusterState(currentState, remainingNodesBuilder, compatibilityVersions) + ); return remainingNodesClusterState.get(); } }; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index fe877bac66b7e..e8e04a1e7c97a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -2240,6 +2241,40 @@ public void testEnsureMetadataFieldCheckedForGlobalStateChanges() { assertThat(unclassifiedFields, empty()); } + public void testIsTimeSeriesTemplate() throws IOException { + var template = new Template(Settings.builder().put("index.mode", "time_series").build(), new CompressedXContent("{}"), null); + // Settings in component template: + { + var componentTemplate = new ComponentTemplate(template, null, null); + var indexTemplate = new ComposableIndexTemplate( + List.of("test-*"), + null, + List.of("component_template_1"), + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ); + Metadata m = Metadata.builder().put("component_template_1", componentTemplate).put("index_template_1", indexTemplate).build(); + assertThat(m.isTimeSeriesTemplate(indexTemplate), is(true)); + } + // Settings in composable index template: + { + var componentTemplate = new ComponentTemplate(new Template(null, null, null), null, null); + var indexTemplate = new ComposableIndexTemplate( + List.of("test-*"), + template, + List.of("component_template_1"), + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ); + Metadata m = Metadata.builder().put("component_template_1", componentTemplate).put("index_template_1", indexTemplate).build(); + assertThat(m.isTimeSeriesTemplate(indexTemplate), is(true)); + } + } + public static Metadata randomMetadata() { return randomMetadata(1); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 6326a18515f0c..c8e6a011bc52e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -72,6 +72,8 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -1022,32 +1024,25 @@ public void testDoNotRebalanceToTheNodeThatNoLongerExists() { final var shardId = new ShardId(index, 0); final var clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes( - DiscoveryNodes.builder() - // data-node-1 left the cluster - .localNodeId("data-node-2") - .masterNodeId("data-node-2") - .add(newNode("data-node-2")) - ) + .nodes(discoveryNodes(1))// node-1 left the cluster .metadata(Metadata.builder().put(indexMetadata, true)) .routingTable( - RoutingTable.builder() - .add(IndexRoutingTable.builder(index).addShard(newShardRouting(shardId, "data-node-2", true, STARTED))) + RoutingTable.builder().add(IndexRoutingTable.builder(index).addShard(newShardRouting(shardId, "node-0", true, STARTED))) ) .build(); final var allocation = createRoutingAllocationFrom(clusterState); final var balance = new DesiredBalance( 1, - Map.of(shardId, new ShardAssignment(Set.of("data-node-1"), 1, 0, 0)) // shard is assigned to the node that has left + Map.of(shardId, new ShardAssignment(Set.of("node-1"), 1, 0, 0)) // shard is assigned to the node that has left ); reconcile(allocation, balance); - assertThat(allocation.routingNodes().node("data-node-1"), nullValue()); - assertThat(allocation.routingNodes().node("data-node-2"), notNullValue()); + assertThat(allocation.routingNodes().node("node-0"), notNullValue()); + assertThat(allocation.routingNodes().node("node-1"), nullValue()); // shard is kept wherever until balance is recalculated - assertThat(allocation.routingNodes().node("data-node-2").getByShardId(shardId), notNullValue()); + assertThat(allocation.routingNodes().node("node-0").getByShardId(shardId), notNullValue()); } public void testDoNotAllocateIgnoredShards() { @@ -1057,7 +1052,7 @@ public void testDoNotAllocateIgnoredShards() { final var shardId = new ShardId(index, 0); final var clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().localNodeId("node-1").masterNodeId("node-1").add(newNode("node-1"))) + .nodes(discoveryNodes(1)) .metadata(Metadata.builder().put(indexMetadata, true)) .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) .build(); @@ -1070,7 +1065,103 @@ public void testDoNotAllocateIgnoredShards() { reconcile(allocation, balance); + assertThat(allocation.routingNodes().node("node-0").size(), equalTo(0)); + assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(1)); + } + + public void testFallbackAllocation() { + + final var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 1, 1)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes(4)) + .metadata(Metadata.builder().put(indexMetadata, true)) + .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) + .build(); + + final Set desiredNodeIds = Set.of("node-1", "node-2"); + final var initialForcedAllocationDecider = new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + // allocation on desired nodes is temporarily not possible + return desiredNodeIds.contains(node.nodeId()) ? Decision.NO : Decision.YES; + } + }; + + final var allocation = createRoutingAllocationFrom(clusterState, initialForcedAllocationDecider); + final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(desiredNodeIds, 2, 0, 0))); + + reconcile(allocation, balance); + + // only primary is allocated to the fallback node, replica stays unassigned + assertThat(allocation.routingNodes().node("node-0").size() + allocation.routingNodes().node("node-1").size(), equalTo(0)); + assertThat(allocation.routingNodes().node("node-2").size() + allocation.routingNodes().node("node-3").size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(1)); + } + + public void testForcedInitialAllocation() { + + final var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes(2)) + .metadata(Metadata.builder().put(indexMetadata, true)) + .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) + .build(); + + final var allocationIsNotPossibleOnDesiredNodeDesiredNode = new AllocationDecider() { + @Override + public Optional> getForcedInitialShardAllocationToNodes(ShardRouting shardRouting, RoutingAllocation allocation) { + return Optional.of(Set.of("node-1"));// intentionally different from the desired balance + } + }; + + final var allocation = createRoutingAllocationFrom(clusterState, allocationIsNotPossibleOnDesiredNodeDesiredNode); + final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(Set.of("node-0"), 1, 0, 0))); + + reconcile(allocation, balance); + + assertThat(allocation.routingNodes().node("node-0").size(), equalTo(0)); + assertThat(allocation.routingNodes().node("node-1").size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(0)); + } + + public void testForcedInitialAllocationDoNotFallback() { + + final var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes(3)) + .metadata(Metadata.builder().put(indexMetadata, true)) + .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) + .build(); + + final var initialForcedAllocationDecider = new AllocationDecider() { + @Override + public Optional> getForcedInitialShardAllocationToNodes(ShardRouting shardRouting, RoutingAllocation allocation) { + return Optional.of(Set.of("node-1"));// intentionally different from the desired balance + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Objects.equals(node.nodeId(), "node-2") ? Decision.YES : Decision.NO; // can allocate only on fallback node + } + }; + + final var allocation = createRoutingAllocationFrom(clusterState, initialForcedAllocationDecider); + final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(Set.of("node-0"), 1, 0, 0))); + + reconcile(allocation, balance); + + assertThat(allocation.routingNodes().node("node-0").size(), equalTo(0)); assertThat(allocation.routingNodes().node("node-1").size(), equalTo(0)); + assertThat(allocation.routingNodes().node("node-2").size(), equalTo(0)); assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(1)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index 43efc7da0df4c..421865e97002d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -21,7 +21,9 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.TransportVersionsFixupListener.NodeTransportVersionTask; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.mockito.ArgumentCaptor; @@ -107,7 +109,7 @@ public void testNothingFixedWhenNothingToInfer() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_8_0)) - .transportVersions(versions(TransportVersion.V_8_8_0)) + .compatibilityVersions(versions(new CompatibilityVersions(TransportVersion.V_8_8_0))) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); @@ -122,7 +124,7 @@ public void testNothingFixedWhenOnNextVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION)) + .compatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION))) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); @@ -137,7 +139,9 @@ public void testNothingFixedWhenOnPreviousVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_7_0, Version.V_8_8_0)) - .transportVersions(versions(TransportVersion.V_8_7_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues(versions(TransportVersion.V_8_7_0, TransportVersion.V_8_8_0), CompatibilityVersions::new) + ) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); @@ -153,7 +157,12 @@ public void testVersionsAreFixed() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); @@ -177,7 +186,12 @@ public void testConcurrentChangesDoNotOverlap() { ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); @@ -187,7 +201,12 @@ public void testConcurrentChangesDoNotOverlap() { ClusterState testState2 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); // should not send any requests listeners.clusterChanged(new ClusterChangedEvent("test", testState2, testState1)); @@ -202,7 +221,12 @@ public void testFailedRequestsAreRetried() { ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); diff --git a/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java b/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java new file mode 100644 index 0000000000000..9d3726c1935d5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.version; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; + +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CompatibilityVersionsTests extends ESTestCase { + + public void testMinimumVersions() { + assertThat( + CompatibilityVersions.minimumVersions(Map.of()), + equalTo(new CompatibilityVersions(TransportVersion.MINIMUM_COMPATIBLE)) + ); + + TransportVersion version1 = TransportVersionUtils.getNextVersion(TransportVersion.MINIMUM_COMPATIBLE, true); + TransportVersion version2 = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersionUtils.getNextVersion(version1, true), + TransportVersion.current() + ); + + CompatibilityVersions compatibilityVersions1 = new CompatibilityVersions(version1); + CompatibilityVersions compatibilityVersions2 = new CompatibilityVersions(version2); + + Map versionsMap = Map.of("node1", compatibilityVersions1, "node2", compatibilityVersions2); + + assertThat(CompatibilityVersions.minimumVersions(versionsMap), equalTo(compatibilityVersions1)); + } +} diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index 34681d858cf67..7122c1465a27d 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.core.IOUtils; import org.elasticsearch.lucene.search.vectorhighlight.CustomFieldQuery; import org.elasticsearch.test.ESTestCase; @@ -64,6 +65,7 @@ public void testVectorHighlighter() throws Exception { ); assertThat(fragment, notNullValue()); assertThat(fragment, equalTo("the big bad dog")); + IOUtils.close(reader, indexWriter, dir); } public void testVectorHighlighterPrefixQuery() throws Exception { @@ -120,6 +122,7 @@ public void testVectorHighlighterPrefixQuery() throws Exception { 30 ); assertThat(fragment, notNullValue()); + IOUtils.close(indexReader, indexWriter, dir); } public void testVectorHighlighterNoStore() throws Exception { @@ -150,6 +153,7 @@ public void testVectorHighlighterNoStore() throws Exception { 30 ); assertThat(fragment, nullValue()); + IOUtils.close(reader, indexWriter, dir); } public void testVectorHighlighterNoTermVector() throws Exception { @@ -176,5 +180,6 @@ public void testVectorHighlighterNoTermVector() throws Exception { 30 ); assertThat(fragment, nullValue()); + IOUtils.close(reader, indexWriter, dir); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java index 9ad906c31c74a..9ea63325ef3ad 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java @@ -8,93 +8,218 @@ package org.elasticsearch.index.mapper.vectors; -import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; +import org.junit.AssumptionViolatedException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper.NEW_SPARSE_VECTOR_INDEX_VERSION; +import static org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper.PREVIOUS_SPARSE_VECTOR_INDEX_VERSION; import static org.hamcrest.Matchers.containsString; -public class SparseVectorFieldMapperTests extends ESSingleNodeTestCase { +public class SparseVectorFieldMapperTests extends MapperTestCase { + + @Override + protected Object getSampleValueForDocument() { + return Map.of("ten", 10, "twenty", 20); + } + + @Override + protected Object getSampleObjectForDocument() { + return getSampleValueForDocument(); + } + + @Override + protected void assertExistsQuery(MapperService mapperService) { + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> super.assertExistsQuery(mapperService)); + assertEquals("[sparse_vector] fields do not support [exists] queries", iae.getMessage()); + } - // this allows to set indexVersion as it is a private setting @Override - protected boolean forbidPrivateIndexSettings() { + protected void minimalMapping(XContentBuilder b) throws IOException { + b.field("type", "sparse_vector"); + } + + @Override + protected boolean supportsStoredFields() { return false; } - public void testValueFetcherIsNotSupported() { - SparseVectorFieldMapper.Builder builder = new SparseVectorFieldMapper.Builder("field"); - MappedFieldType fieldMapper = builder.build(MapperBuilderContext.root(false)).fieldType(); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldMapper.valueFetcher(null, null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); + @Override + protected boolean supportsIgnoreMalformed() { + return false; } - public void testSparseVectorWith8xIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_0_0, IndexVersion.current()); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + @Override + protected void registerParameters(ParameterChecker checker) throws IOException {} - IndexService indexService = createIndex("index", settings); - MapperService mapperService = indexService.mapperService(); + @Override + protected boolean supportsMeta() { + return false; + } - BytesReference mapping = BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("my-vector") - .field("type", "sparse_vector") - .endObject() - .endObject() - .endObject() - .endObject() - ); + private static int getFrequency(TokenStream tk) throws IOException { + TermFrequencyAttribute freqAttribute = tk.addAttribute(TermFrequencyAttribute.class); + tk.reset(); + assertTrue(tk.incrementToken()); + int freq = freqAttribute.getTermFrequency(); + assertFalse(tk.incrementToken()); + return freq; + } - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> mapperService.parseMapping(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping)) - ); - assertThat(e.getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE)); + public void testDefaults() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(source(this::writeField)); + + List fields = doc1.rootDoc().getFields("field"); + assertEquals(2, fields.size()); + assertThat(fields.get(0), Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = null; + FeatureField featureField2 = null; + for (IndexableField field : fields) { + if (field.stringValue().equals("ten")) { + featureField1 = (FeatureField) field; + } else if (field.stringValue().equals("twenty")) { + featureField2 = (FeatureField) field; + } else { + throw new UnsupportedOperationException(); + } + } + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 < freq2); } - public void testSparseVectorWith7xIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.V_8_0_0); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + public void testDotInFieldName() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + DocumentParsingException ex = expectThrows( + DocumentParsingException.class, + () -> mapper.parse(source(b -> b.field("field", Map.of("politi.cs", 10, "sports", 20)))) + ); + assertThat(ex.getCause().getMessage(), containsString("do not support dots in feature names")); + assertThat(ex.getCause().getMessage(), containsString("politi.cs")); + } - IndexService indexService = createIndex("index", settings); - MapperService mapperService = indexService.mapperService(); + public void testRejectMultiValuedFields() throws MapperParsingException, IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { + b.startObject("field").field("type", "sparse_vector").endObject(); + b.startObject("foo").startObject("properties"); + { + b.startObject("field").field("type", "sparse_vector").endObject(); + } + b.endObject().endObject(); + })); - BytesReference mapping = BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("my-vector") - .field("type", "sparse_vector") - .endObject() - .endObject() - .endObject() - .endObject() + DocumentParsingException e = expectThrows( + DocumentParsingException.class, + () -> mapper.parse(source(b -> b.startObject("field").field("foo", Arrays.asList(10, 20)).endObject())) + ); + assertEquals( + "[sparse_vector] fields take hashes that map a feature to a strictly positive float, but got unexpected token " + "START_ARRAY", + e.getCause().getMessage() ); - DocumentMapper mapper = mapperService.merge( - MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE + e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> { + b.startArray("foo"); + { + b.startObject().startObject("field").field("bar", 10).endObject().endObject(); + b.startObject().startObject("field").field("bar", 20).endObject().endObject(); + } + b.endArray(); + }))); + assertEquals( + "[sparse_vector] fields do not support indexing multiple values for the same feature [foo.field.bar] in " + "the same document", + e.getCause().getMessage() ); + } + + public void testCannotBeUsedInMultiFields() { + Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { + b.field("type", "keyword"); + b.startObject("fields"); + b.startObject("feature"); + b.field("type", "sparse_vector"); + b.endObject(); + b.endObject(); + }))); + assertThat(e.getMessage(), containsString("Field [feature] of type [sparse_vector] can't be used in multifields")); + } + + @Override + protected Object generateRandomInputValue(MappedFieldType ft) { + assumeFalse("Test implemented in a follow up", true); + return null; + } + + @Override + protected boolean allowsNullValues() { + return false; // TODO should this allow null values? + } + + @Override + protected SyntheticSourceSupport syntheticSourceSupport(boolean syntheticSource) { + throw new AssumptionViolatedException("not supported"); + } + + @Override + protected IngestScriptSupport ingestScriptSupport() { + throw new AssumptionViolatedException("not supported"); + } + + @Override + protected String[] getParseMinimalWarnings(IndexVersion indexVersion) { + String[] additionalWarnings = null; + if (indexVersion.before(PREVIOUS_SPARSE_VECTOR_INDEX_VERSION)) { + additionalWarnings = new String[] { SparseVectorFieldMapper.ERROR_MESSAGE_7X }; + } + return Strings.concatStringArrays(super.getParseMinimalWarnings(indexVersion), additionalWarnings); + } + + @Override + protected IndexVersion boostNotAllowedIndexVersion() { + return NEW_SPARSE_VECTOR_INDEX_VERSION; + } + + public void testSparseVectorWith7xIndex() throws Exception { + IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), PREVIOUS_SPARSE_VECTOR_INDEX_VERSION); + + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("my-vector") + .field("type", "sparse_vector") + .endObject() + .endObject() + .endObject() + .endObject(); + + DocumentMapper mapper = createDocumentMapper(version, builder); assertWarnings(SparseVectorFieldMapper.ERROR_MESSAGE_7X); // Check that new vectors cannot be indexed. @@ -115,6 +240,18 @@ public void testSparseVectorWith7xIndex() throws Exception { DocumentParsingException.class, () -> mapper.parse(new SourceToParse("id", source, XContentType.JSON)) ); - assertThat(indexException.getCause().getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE)); + assertThat(indexException.getCause().getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE_7X)); + } + + public void testSparseVectorUnsupportedIndex() throws Exception { + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + PREVIOUS_SPARSE_VECTOR_INDEX_VERSION, + IndexVersion.V_8_500_000 + ); + Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(version, fieldMapping(b -> { + b.field("type", "sparse_vector"); + }))); + assertThat(e.getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE_8X)); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java index 574fb63cd3fb0..1575d71110c42 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java @@ -26,22 +26,4 @@ public void testIsNotAggregatable() { MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); assertFalse(fieldType.isAggregatable()); } - - public void testDocValueFormatIsNotSupported() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldType.docValueFormat(null, null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); - } - - public void testExistsQueryIsNotSupported() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldType.existsQuery(null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); - } - - public void testTermQueryIsNotSupported() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldType.termQuery(null, null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); - } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index ccd4d9b77010b..a3061df8839fb 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1748,6 +1748,9 @@ public void testRefreshMetric() throws IOException { } indexDoc(shard, "_doc", "test"); shard.writeIndexingBuffer(); + // This did not actually run a refresh, it called IndexWriter#flushNextBuffer() + assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount + 1)); + shard.refresh("force"); assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount + 2)); closeShards(shard); } @@ -1772,9 +1775,10 @@ public void testExternalRefreshMetric() throws IOException { assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 1 - extraInternalRefreshes)); } indexDoc(shard, "_doc", "test"); + // This runs IndexWriter#flushNextBuffer internally shard.writeIndexingBuffer(); assertThat(shard.refreshStats().getExternalTotal(), equalTo(externalRefreshCount)); - assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 2 - extraInternalRefreshes)); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 1 - extraInternalRefreshes)); closeShards(shard); } diff --git a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java index 9c42aabddcc3a..3bb6265e2c94d 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java @@ -146,6 +146,7 @@ public double execute( assertEquals(1, topDocs.totalHits.value); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); + r.close(); w.close(); dir.close(); } @@ -238,6 +239,7 @@ public double execute( assertTrue(initCalled.get()); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); + r.close(); w.close(); dir.close(); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 0ffda645f7a48..0c21e80290bd3 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -7,20 +7,16 @@ */ package org.elasticsearch.indices; -import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.InternalEngine; -import org.elasticsearch.index.refresh.RefreshStats; +import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool; @@ -42,6 +38,7 @@ import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; public class IndexingMemoryControllerTests extends IndexShardTestCase { @@ -96,7 +93,7 @@ protected long getShardWritingBytes(IndexShard shard) { protected void checkIdle(IndexShard shard, long inactiveTimeNS) {} @Override - public void writeIndexingBufferAsync(IndexShard shard) { + public void enqueueWriteIndexingBuffer(IndexShard shard) { long bytes = indexBufferRAMBytesUsed.put(shard, 0L); writingBytes.put(shard, writingBytes.get(shard) + bytes); indexBufferRAMBytesUsed.put(shard, 0L); @@ -162,12 +159,18 @@ protected Cancellable scheduleTask(ThreadPool threadPool) { public void testShardAdditionAndRemoval() throws IOException { MockController controller = new MockController(Settings.builder().put("indices.memory.index_buffer_size", "4mb").build()); - IndexShard shard0 = newStartedShard(); + IndexShard shard0 = newStartedShard( + p -> newShard(p, new ShardId("index0", "uuid0", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard0); controller.assertBuffer(shard0, 1); // add another shard - IndexShard shard1 = newStartedShard(); + IndexShard shard1 = newStartedShard( + p -> newShard(p, new ShardId("index1", "uuid1", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard1); controller.assertBuffer(shard0, 1); controller.assertBuffer(shard1, 1); @@ -192,9 +195,15 @@ public void testActiveInactive() throws IOException { MockController controller = new MockController(Settings.builder().put("indices.memory.index_buffer_size", "5mb").build()); - IndexShard shard0 = newStartedShard(); + IndexShard shard0 = newStartedShard( + p -> newShard(p, new ShardId("index0", "uuid0", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard0); - IndexShard shard1 = newStartedShard(); + IndexShard shard1 = newStartedShard( + p -> newShard(p, new ShardId("index1", "uuid1", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard1); controller.assertBuffer(shard0, 1); @@ -206,7 +215,7 @@ public void testActiveInactive() throws IOException { controller.assertBuffer(shard0, 2); controller.assertBuffer(shard1, 2); - // index into one shard only, crosses the 5mb limit, so shard1 is refreshed + // index into one shard only, crosses the 5mb limit, so shard0 is refreshed controller.simulateIndexing(shard0); controller.simulateIndexing(shard0); controller.assertBuffer(shard0, 0); @@ -285,28 +294,37 @@ public void testMaxBufferSizes() { public void testThrottling() throws Exception { MockController controller = new MockController(Settings.builder().put("indices.memory.index_buffer_size", "4mb").build()); - IndexShard shard0 = newStartedShard(); - IndexShard shard1 = newStartedShard(); - controller.simulateIndexing(shard0); + IndexShard shard0 = newStartedShard( + p -> newShard(p, new ShardId("index0", "uuid0", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); + IndexShard shard1 = newStartedShard( + p -> newShard(p, new ShardId("index1", "uuid1", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); + + assertThat(shard0.routingEntry().shardId(), lessThan(shard1.routingEntry().shardId())); + controller.simulateIndexing(shard0); controller.simulateIndexing(shard0); - controller.assertBuffer(shard0, 3); + controller.assertBuffer(shard0, 2); + controller.simulateIndexing(shard1); controller.simulateIndexing(shard1); controller.simulateIndexing(shard1); - // We are now using 5 MB, so we should be writing shard0 since it's using the most heap: - controller.assertWriting(shard0, 3); + // We are now using 5 MB, so we should be writing shard0 since shards get flushed by increasing shard id, even though shard1 uses + // more RAM buffer + controller.assertWriting(shard0, 2); controller.assertWriting(shard1, 0); controller.assertBuffer(shard0, 0); - controller.assertBuffer(shard1, 2); + controller.assertBuffer(shard1, 3); controller.simulateIndexing(shard0); controller.simulateIndexing(shard1); - controller.simulateIndexing(shard1); - // Now we are still writing 3 MB (shard0), and using 5 MB index buffers, so we should now 1) be writing shard1, - // and 2) be throttling shard1: - controller.assertWriting(shard0, 3); + // We crossed the limit again, so now we should be writing the next shard after shard0: shard1. And since bytes are still being + // written and haven't been released yet, we should be throttling the same shard we flushed: shard1. + controller.assertWriting(shard0, 2); controller.assertWriting(shard1, 4); controller.assertBuffer(shard0, 1); controller.assertBuffer(shard1, 0); @@ -323,7 +341,7 @@ public void testThrottling() throws Exception { controller.simulateIndexing(shard0); // Now we are using 5 MB again, so shard0 should also be writing and now also be throttled: - controller.assertWriting(shard0, 8); + controller.assertWriting(shard0, 7); controller.assertWriting(shard1, 4); controller.assertBuffer(shard0, 0); controller.assertBuffer(shard1, 0); @@ -354,7 +372,7 @@ public void testTranslogRecoveryWorksWithIMC() throws IOException { AtomicInteger flushes = new AtomicInteger(); IndexingMemoryController imc = new IndexingMemoryController(settings, threadPool, iterable) { @Override - protected void writeIndexingBufferAsync(IndexShard shard) { + protected void enqueueWriteIndexingBuffer(IndexShard shard) { assertEquals(shard, shardRef.get()); flushes.incrementAndGet(); shard.writeIndexingBuffer(); @@ -372,40 +390,6 @@ protected void writeIndexingBufferAsync(IndexShard shard) { closeShards(shard); } - EngineConfig configWithRefreshListener(EngineConfig config, ReferenceManager.RefreshListener listener) { - final List internalRefreshListener = new ArrayList<>(config.getInternalRefreshListener()); - ; - internalRefreshListener.add(listener); - return new EngineConfig( - config.getShardId(), - config.getThreadPool(), - config.getIndexSettings(), - config.getWarmer(), - config.getStore(), - config.getMergePolicy(), - config.getAnalyzer(), - config.getSimilarity(), - new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE), - config.getEventListener(), - config.getQueryCache(), - config.getQueryCachingPolicy(), - config.getTranslogConfig(), - config.getFlushMergesAfter(), - config.getExternalRefreshListener(), - internalRefreshListener, - config.getIndexSort(), - config.getCircuitBreakerService(), - config.getGlobalCheckpointSupplier(), - config.retentionLeasesSupplier(), - config.getPrimaryTermSupplier(), - config.getSnapshotCommitSupplier(), - config.getLeafSorter(), - config.getRelativeTimeInNanosSupplier(), - config.getIndexCommitListener(), - config.isPromotableToPrimary() - ); - } - ThreadPoolStats.Stats getRefreshThreadPoolStats() { final ThreadPoolStats stats = threadPool.stats(); for (ThreadPoolStats.Stats s : stats) { @@ -416,32 +400,19 @@ ThreadPoolStats.Stats getRefreshThreadPoolStats() { throw new AssertionError("refresh thread pool stats not found [" + stats + "]"); } - public void testSkipRefreshIfShardIsRefreshingAlready() throws Exception { - SetOnce refreshLatch = new SetOnce<>(); - ReferenceManager.RefreshListener refreshListener = new ReferenceManager.RefreshListener() { + public void testSkipIfPendingAlready() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + IndexShard shard = newStartedShard(randomBoolean(), Settings.EMPTY, config -> new InternalEngine(config) { @Override - public void beforeRefresh() { - if (refreshLatch.get() != null) { - try { - refreshLatch.get().await(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } + public void writeIndexingBuffer() throws IOException { + try { + latch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); } + super.writeIndexingBuffer(); } - - @Override - public void afterRefresh(boolean didRefresh) { - - } - }; - IndexShard shard = newStartedShard( - randomBoolean(), - Settings.EMPTY, - config -> new InternalEngine(configWithRefreshListener(config, refreshListener)) - ); - refreshLatch.set(new CountDownLatch(1)); // block refresh - final RefreshStats refreshStats = shard.refreshStats(); + }); final IndexingMemoryController controller = new IndexingMemoryController( Settings.builder() .put("indices.memory.interval", "200h") // disable it @@ -460,21 +431,24 @@ protected long getShardWritingBytes(IndexShard shard) { return 0L; } }; - int iterations = randomIntBetween(10, 100); ThreadPoolStats.Stats beforeStats = getRefreshThreadPoolStats(); + int iterations = randomIntBetween(1000, 2000); for (int i = 0; i < iterations; i++) { controller.forceCheck(); } assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.completed(), equalTo(beforeStats.completed() + iterations - 1)); + assertThat(stats.active(), greaterThanOrEqualTo(1)); }); - refreshLatch.get().countDown(); // allow refresh + latch.countDown(); assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.completed(), equalTo(beforeStats.completed() + iterations)); + assertThat(stats.queue(), equalTo(0)); }); - assertThat(shard.refreshStats().getTotal(), equalTo(refreshStats.getTotal() + 1)); + ThreadPoolStats.Stats afterStats = getRefreshThreadPoolStats(); + // The number of completed tasks should be in the order of the size of the refresh thread pool, way below the number of iterations, + // since we would not queue a shard to write its indexing buffer if it's already in the queue. + assertThat(afterStats.completed() - beforeStats.completed(), lessThan(100L)); closeShards(shard); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java index a70e7a241fe7a..88393c63ff56f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java @@ -193,10 +193,10 @@ public void testSerialization() throws IOException { backwardsCompatible.add(i); } - TDigestState serialized = writeToAndReadFrom(state, TransportVersion.V_8_500_014); + TDigestState serialized = writeToAndReadFrom(state, TransportVersion.V_8_500_020); assertEquals(serialized, state); - TDigestState serializedBackwardsCompatible = writeToAndReadFrom(state, TransportVersion.V_8_500_010); + TDigestState serializedBackwardsCompatible = writeToAndReadFrom(state, TransportVersion.V_8_8_1); assertNotEquals(serializedBackwardsCompatible, state); assertEquals(serializedBackwardsCompatible, backwardsCompatible); } diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java index 65b7eb1c844c4..374b0a2e516e4 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java @@ -236,7 +236,7 @@ public void testChannelVersion() throws Exception { version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_8_0, TransportVersion.current()); } if (request.source() != null && request.source().subSearches().size() >= 2) { - version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_500_013, TransportVersion.current()); + version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_500_020, TransportVersion.current()); } request = copyWriteable(request, namedWriteableRegistry, ShardSearchRequest::new, version); channelVersion = TransportVersion.min(channelVersion, version); diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java index 75a2c4e69c6a1..ee7b929072bd1 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java @@ -22,8 +22,10 @@ import org.junit.Before; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -274,24 +276,48 @@ public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor } public void testRunnableDoesNotRunAfterCancellation() throws Exception { - final int iterations = scaledRandomIntBetween(2, 12); - final AtomicInteger counter = new AtomicInteger(); - final CountDownLatch doneLatch = new CountDownLatch(iterations); - final Runnable countingRunnable = () -> { + int iterations = scaledRandomIntBetween(2, 12); + + // we don't have the cancellable until we schedule the task, which needs the barrier object to reference in the closure + // so break the circular dependency here + AtomicReference checkCancel = new AtomicReference<>(); + + AtomicInteger counter = new AtomicInteger(); + CyclicBarrier barrier = new CyclicBarrier(2, () -> checkCancel.get().run()); + Runnable countingRunnable = () -> { counter.incrementAndGet(); - doneLatch.countDown(); + try { + barrier.await(); + } catch (Exception e) { + throw new AssertionError(e); + } }; - final TimeValue interval = TimeValue.timeValueMillis(50L); - final Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, interval, threadPool.generic()); - doneLatch.await(); - cancellable.cancel(); + TimeValue interval = TimeValue.timeValueMillis(50L); + Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, interval, threadPool.generic()); + checkCancel.set(new Runnable() { + private int remaining = iterations; + + @Override + public void run() { + if (--remaining == 0) { + cancellable.cancel(); + } + } + }); + + for (int i = 0; i < iterations; i++) { + barrier.await(); + } + expectThrows(TimeoutException.class, () -> barrier.await(2 * interval.millis(), TimeUnit.MILLISECONDS)); - final int counterValue = counter.get(); - assertThat(counterValue, equalTo(iterations)); + assertThat(counter.get(), equalTo(iterations)); if (rarely()) { - assertBusy(() -> assertThat(counter.get(), equalTo(iterations)), 5 * interval.millis(), TimeUnit.MILLISECONDS); + assertBusy(() -> { + expectThrows(TimeoutException.class, () -> barrier.await(interval.millis(), TimeUnit.MILLISECONDS)); + assertThat(counter.get(), equalTo(iterations)); + }, 5 * interval.millis(), TimeUnit.MILLISECONDS); } } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index ffcbf306417c9..bc5709c77b74d 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.transport; +import org.apache.logging.log4j.Level; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -26,6 +27,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -44,6 +46,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; +import static org.elasticsearch.test.MockLogAppender.assertThatLogger; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; import static org.elasticsearch.test.NodeRoles.nonMasterNode; import static org.elasticsearch.test.NodeRoles.onlyRoles; @@ -863,7 +866,7 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { } } - private ActionListener connectionListener(final CountDownLatch latch) { + private ActionListener connectionListener(final CountDownLatch latch) { return ActionTestUtils.assertNoFailureListener(x -> latch.countDown()); } @@ -1423,6 +1426,53 @@ public void testUseDifferentTransportProfileForCredentialsProtectedRemoteCluster } } + public void testLogsConnectionResult() throws IOException { + + try ( + var remote = startTransport("remote", List.of(), VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY); + var local = startTransport("local", List.of(), VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY); + var remoteClusterService = new RemoteClusterService(Settings.EMPTY, local) + ) { + var clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + remoteClusterService.listenForUpdates(clusterSettings); + + assertThatLogger( + () -> clusterSettings.applySettings( + Settings.builder().putList("cluster.remote.remote_1.seeds", remote.getLocalDiscoNode().getAddress().toString()).build() + ), + RemoteClusterService.class, + new MockLogAppender.SeenEventExpectation( + "Should log when connecting to remote", + RemoteClusterService.class.getCanonicalName(), + Level.INFO, + "remote cluster connection [remote_1] updated: CONNECTED" + ) + ); + + assertThatLogger( + () -> clusterSettings.applySettings(Settings.EMPTY), + RemoteClusterService.class, + new MockLogAppender.SeenEventExpectation( + "Should log when disconnecting from remote", + RemoteClusterService.class.getCanonicalName(), + Level.INFO, + "remote cluster connection [remote_1] updated: DISCONNECTED" + ) + ); + + assertThatLogger( + () -> clusterSettings.applySettings(Settings.builder().put(randomIdentifier(), randomIdentifier()).build()), + RemoteClusterService.class, + new MockLogAppender.UnseenEventExpectation( + "Should not log when changing unrelated setting", + RemoteClusterService.class.getCanonicalName(), + Level.INFO, + "*" + ) + ); + } + } + private static Settings createSettings(String clusterAlias, List seeds) { Settings.Builder builder = Settings.builder(); builder.put( diff --git a/settings.gradle b/settings.gradle index fbaf783d76aa7..09aaef7ede189 100644 --- a/settings.gradle +++ b/settings.gradle @@ -14,7 +14,7 @@ pluginManagement { } plugins { - id "com.gradle.enterprise" version "3.13.1" + id "com.gradle.enterprise" version "3.14.1" id 'elasticsearch.java-toolchain' } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index c45e08c857f48..bc58a792cefc6 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -441,6 +441,10 @@ protected String[] getParseMinimalWarnings() { return Strings.EMPTY_ARRAY; } + protected String[] getParseMinimalWarnings(IndexVersion indexVersion) { + return getParseMinimalWarnings(); + } + protected String[] getParseMaximalWarnings() { // Most mappers don't emit any warnings return Strings.EMPTY_ARRAY; @@ -494,24 +498,26 @@ public final void testMeta() throws IOException { ); } - public final void testDeprecatedBoost() throws IOException { + public final void testDeprecatedBoostWarning() throws IOException { try { createMapperService(DEPRECATED_BOOST_INDEX_VERSION, fieldMapping(b -> { minimalMapping(b, DEPRECATED_BOOST_INDEX_VERSION); b.field("boost", 2.0); })); String[] warnings = Strings.concatStringArrays( - getParseMinimalWarnings(), + getParseMinimalWarnings(DEPRECATED_BOOST_INDEX_VERSION), new String[] { "Parameter [boost] on field [field] is deprecated and has no effect" } ); assertWarnings(warnings); } catch (MapperParsingException e) { assertThat(e.getMessage(), anyOf(containsString("Unknown parameter [boost]"), containsString("[boost : 2.0]"))); } + } + public void testBoostNotAllowed() throws IOException { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> createMapperService(IndexVersion.V_8_0_0, fieldMapping(b -> { + () -> createMapperService(boostNotAllowedIndexVersion(), fieldMapping(b -> { minimalMapping(b); b.field("boost", 2.0); })) @@ -521,6 +527,10 @@ public final void testDeprecatedBoost() throws IOException { assertParseMinimalWarnings(); } + protected IndexVersion boostNotAllowedIndexVersion() { + return IndexVersion.V_8_0_0; + } + /** * Use a {@linkplain ValueFetcher} to extract values from doc values. */ diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 4ed2b6b1eb808..5a6d8bb878af8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -223,12 +223,32 @@ protected IndexShard newShard( Settings settings, EngineFactory engineFactory, final IndexingOperationListener... listeners + ) throws IOException { + return newShard(primary, new ShardId("index", "_na_", 0), settings, engineFactory, listeners); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + * @param shardId the shard ID for this shard + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + * @param listeners the indexing operation listeners to add + */ + protected IndexShard newShard( + boolean primary, + ShardId shardId, + Settings settings, + EngineFactory engineFactory, + final IndexingOperationListener... listeners ) throws IOException { final RecoverySource recoverySource = primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE; final ShardRouting shardRouting = TestShardRouting.newShardRouting( - new ShardId("index", "_na_", 0), + shardId, randomAlphaOfLength(10), primary, ShardRoutingState.INITIALIZING, @@ -482,7 +502,7 @@ protected IndexShard newShard( xContentRegistry(), createTempDir(), indexSettings.getSettings(), - "index" + routing.getIndexName() ); mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); @@ -712,7 +732,10 @@ public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRoutin protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) throws IOException { IndexShard primary = null; try { - primary = newStartedShard(true); + primary = newStartedShard( + p -> newShard(p, replica.routingEntry().shardId(), replica.indexSettings.getSettings(), new InternalEngineFactory()), + true + ); recoverReplica(replica, primary, startReplica); } finally { closeShards(primary); diff --git a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc index edfd7cdb486dd..edbc4c610b272 100644 --- a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc @@ -38,8 +38,8 @@ PUT _watcher/watch/cluster_health_watch Since this watch runs so frequently, don't forget to <> when you're done experimenting. -To get the status of your cluster, you can call the Elasticsearch -{ref}//cluster-health.html[cluster health] API: +To get the status of your cluster, you can call the <>: [source,console] -------------------------------------------------- diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java index 3cebd756408fb..b5711f33dd6ce 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java @@ -82,10 +82,8 @@ protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBu public BoxplotAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); - } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - executionHint = TDigestExecutionHint.readFrom(in); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; } @@ -99,10 +97,8 @@ public Set metricNames() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeOptionalWriteable(executionHint); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - (executionHint == null ? TDigestExecutionHint.DEFAULT : executionHint).writeTo(out); } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java index b47ef33b4e68f..96c50dc9fc790 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java @@ -63,7 +63,7 @@ public InternalResetTrackingRate(StreamInput in) throws IOException { this.startTime = in.readLong(); this.endTime = in.readLong(); this.resetCompensation = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_015)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { this.rateUnit = Rounding.DateTimeUnit.resolve(in.readByte()); } else { this.rateUnit = Rounding.DateTimeUnit.SECOND_OF_MINUTE; @@ -82,7 +82,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(startTime); out.writeLong(endTime); out.writeDouble(resetCompensation); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_015) && rateUnit != null) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020) && rateUnit != null) { out.writeByte(rateUnit.getId()); } else { out.writeByte(Rounding.DateTimeUnit.SECOND_OF_MINUTE.getId()); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 80fb948754403..494a2814eaf40 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -971,7 +971,7 @@ public static class ReactiveReason implements AutoscalingDeciderResult.Reason { static final int MAX_AMOUNT_OF_SHARDS = 512; private static final TransportVersion SHARD_IDS_OUTPUT_VERSION = TransportVersion.V_8_4_0; - private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersion.V_8_500_010; + private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersion.V_8_500_020; private final String reason; private final long unassigned; diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index b0bd3d955322e..f80904625aede 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -55,7 +55,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.LongAdder; -import java.util.function.LongConsumer; +import java.util.function.IntConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -263,7 +263,7 @@ public void validate(ByteSizeValue value, Map, Object> settings, bool private final SharedBytes sharedBytes; private final long cacheSize; - private final long regionSize; + private final int regionSize; private final ByteSizeValue rangeSize; private final ByteSizeValue recoveryRangeSize; @@ -308,7 +308,7 @@ public SharedBlobCacheService( throw new IllegalStateException("unable to probe size of filesystem [" + environment.nodeDataPaths()[0] + "]"); } this.cacheSize = calculateCacheSize(settings, totalFsSize); - final long regionSize = SHARED_CACHE_REGION_SIZE_SETTING.get(settings).getBytes(); + final int regionSize = Math.toIntExact(SHARED_CACHE_REGION_SIZE_SETTING.get(settings).getBytes()); this.numRegions = Math.toIntExact(cacheSize / regionSize); keyMapping = new ConcurrentHashMap<>(); if (Assertions.ENABLED) { @@ -360,16 +360,16 @@ private int getRegion(long position) { return (int) (position / regionSize); } - private long getRegionRelativePosition(long position) { - return position % regionSize; + private int getRegionRelativePosition(long position) { + return (int) (position % regionSize); } private long getRegionStart(int region) { - return region * regionSize; + return (long) region * regionSize; } private long getRegionEnd(int region) { - return (region + 1) * regionSize; + return (long) (region + 1) * regionSize; } private int getEndingRegion(long position) { @@ -393,12 +393,12 @@ private ByteRange mapSubRangeToRegion(ByteRange range, int region) { ); } - private long getRegionSize(long fileLength, int region) { + private int getRegionSize(long fileLength, int region) { assert fileLength > 0; final int maxRegion = getEndingRegion(fileLength); assert region >= 0 && region <= maxRegion : region + " - " + maxRegion; - final long effectiveRegionSize; - if (region == maxRegion && (region + 1) * regionSize != fileLength) { + final int effectiveRegionSize; + if (region == maxRegion && (long) (region + 1) * regionSize != fileLength) { assert getRegionRelativePosition(fileLength) != 0L; effectiveRegionSize = getRegionRelativePosition(fileLength); } else { @@ -415,7 +415,7 @@ Entry get(KeyType cacheKey, long fileLength, int region) { // find an entry var entry = keyMapping.get(regionKey); if (entry == null) { - final long effectiveRegionSize = getRegionSize(fileLength, region); + final int effectiveRegionSize = getRegionSize(fileLength, region); entry = keyMapping.computeIfAbsent(regionKey, key -> new Entry<>(new CacheFileRegion(key, effectiveRegionSize), now)); } // io is volatile, double locking is fine, as long as we assign it last. @@ -449,33 +449,37 @@ Entry get(KeyType cacheKey, long fileLength, int region) { * @param cacheKey the key to fetch data for * @param length the length of the blob to fetch * @param writer a writer that handles writing of newly downloaded data to the shared cache + * @param listener listener that is called once all downloading has finished * * @return {@code true} if there were enough free pages to start downloading */ - public boolean maybeFetchFullEntry(KeyType cacheKey, long length, RangeMissingHandler writer) { + public boolean maybeFetchFullEntry(KeyType cacheKey, long length, RangeMissingHandler writer, ActionListener listener) { int finalRegion = getEndingRegion(length); if (freeRegionCount() < finalRegion) { // Not enough room to download a full file without evicting existing data, so abort return false; } long regionLength = regionSize; - for (int region = 0; region <= finalRegion; region++) { - var entry = get(cacheKey, length, region); - if (region == finalRegion) { - regionLength = length - getRegionStart(region); - } - ByteRange rangeToWrite = ByteRange.of(0, regionLength); - if (rangeToWrite.length() == 0) { - return true; + try (RefCountingListener refCountingListener = new RefCountingListener(listener)) { + for (int region = 0; region <= finalRegion; region++) { + var entry = get(cacheKey, length, region); + if (region == finalRegion) { + regionLength = length - getRegionStart(region); + } + ByteRange rangeToWrite = ByteRange.of(0, regionLength); + if (rangeToWrite.isEmpty()) { + return true; + } + // set read range == write range so the listener completes only once all the bytes have been downloaded + entry.chunk.populateAndRead( + rangeToWrite, + rangeToWrite, + (channel, pos, relativePos, len) -> Math.toIntExact(len), + writer, + bulkIOExecutor, + refCountingListener.acquire(ignored -> {}) + ); } - entry.chunk.populateAndRead( - rangeToWrite, - ByteRange.EMPTY, - (channel, pos, relativePos, len) -> 0, - writer, - bulkIOExecutor, - ActionListener.noop() - ); } return true; } @@ -820,19 +824,15 @@ class CacheFileRegion extends EvictableRefCounted { final SparseFileTracker tracker; volatile SharedBytes.IO io = null; - CacheFileRegion(RegionKey regionKey, long regionSize) { + CacheFileRegion(RegionKey regionKey, int regionSize) { this.regionKey = regionKey; - assert regionSize > 0L; + assert regionSize > 0; tracker = new SparseFileTracker("file", regionSize); } public long physicalStartOffset() { var ioRef = io; - return ioRef == null ? -1L : ioRef.pageStart(); - } - - public long physicalEndOffset() { - return physicalStartOffset() + sharedBytes.regionSize; + return ioRef == null ? -1L : (long) regionKey.region * regionSize; } // tries to evict this chunk if noone is holding onto its resources anymore @@ -881,11 +881,9 @@ private static void throwAlreadyEvicted() { } boolean tryRead(ByteBuffer buf, long offset) throws IOException { - int startingPos = buf.position(); - var ioRef = io; - ioRef.read(buf, ioRef.pageStart() + getRegionRelativePosition(offset)); + int readBytes = io.read(buf, getRegionRelativePosition(offset)); if (isEvicted()) { - buf.position(startingPos); + buf.position(buf.position() - readBytes); return false; } return true; @@ -909,14 +907,9 @@ void populateAndRead( rangeToRead, ActionListener.runBefore(listener, resource::close).delegateFailureAndWrap((l, success) -> { var ioRef = io; - final long physicalStartOffset = ioRef.pageStart(); assert regionOwners.get(ioRef) == this; - final int read = reader.onRangeAvailable( - ioRef, - physicalStartOffset + rangeToRead.start(), - rangeToRead.start(), - rangeToRead.length() - ); + final int start = Math.toIntExact(rangeToRead.start()); + final int read = reader.onRangeAvailable(ioRef, start, start, Math.toIntExact(rangeToRead.length())); assert read == rangeToRead.length() : "partial read [" + read @@ -946,14 +939,14 @@ private void fillGaps(Executor executor, RangeMissingHandler writer, List gap.onProgress(start + progress) ); writeCount.increment(); @@ -1065,8 +1058,8 @@ private int readSingleRegion( fileRegion.populateAndRead( mapSubRangeToRegion(rangeToWrite, region), mapSubRangeToRegion(rangeToRead, region), - readerWithOffset(reader, fileRegion, rangeToRead.start() - regionStart), - writerWithOffset(writer, fileRegion, rangeToWrite.start() - regionStart), + readerWithOffset(reader, fileRegion, Math.toIntExact(rangeToRead.start() - regionStart)), + writerWithOffset(writer, fileRegion, Math.toIntExact(rangeToWrite.start() - regionStart)), ioExecutor, readFuture ); @@ -1095,8 +1088,8 @@ private int readMultiRegions( fileRegion.populateAndRead( mapSubRangeToRegion(rangeToWrite, region), subRangeToRead, - readerWithOffset(reader, fileRegion, rangeToRead.start() - regionStart), - writerWithOffset(writer, fileRegion, rangeToWrite.start() - regionStart), + readerWithOffset(reader, fileRegion, Math.toIntExact(rangeToRead.start() - regionStart)), + writerWithOffset(writer, fileRegion, Math.toIntExact(rangeToWrite.start() - regionStart)), ioExecutor, listeners.acquire(i -> bytesRead.updateAndGet(j -> Math.addExact(i, j))) ); @@ -1106,7 +1099,7 @@ private int readMultiRegions( return bytesRead.get(); } - private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, CacheFileRegion fileRegion, long writeOffset) { + private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, CacheFileRegion fileRegion, int writeOffset) { final RangeMissingHandler adjustedWriter; if (writeOffset == 0) { // no need to allocate a new capturing lambda if the offset isn't adjusted @@ -1129,7 +1122,7 @@ private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, CacheFi return adjustedWriter; } - private RangeAvailableHandler readerWithOffset(RangeAvailableHandler reader, CacheFileRegion fileRegion, long readOffset) { + private RangeAvailableHandler readerWithOffset(RangeAvailableHandler reader, CacheFileRegion fileRegion, int readOffset) { final RangeAvailableHandler adjustedReader = (channel, channelPos, relativePos, len) -> reader.onRangeAvailable( channel, channelPos, @@ -1145,9 +1138,9 @@ private RangeAvailableHandler readerWithOffset(RangeAvailableHandler reader, Cac return adjustedReader; } - private boolean assertValidRegionAndLength(CacheFileRegion fileRegion, long channelPos, long len) { + private boolean assertValidRegionAndLength(CacheFileRegion fileRegion, int channelPos, int len) { assert regionOwners.get(fileRegion.io) == fileRegion; - assert channelPos >= fileRegion.physicalStartOffset() && channelPos + len <= fileRegion.physicalEndOffset(); + assert channelPos >= 0 && channelPos + len <= regionSize; return true; } @@ -1165,12 +1158,12 @@ public CacheFile getCacheFile(KeyType cacheKey, long length) { public interface RangeAvailableHandler { // caller that wants to read from x should instead do a positional read from x + relativePos // caller should also only read up to length, further bytes will be offered by another call to this method - int onRangeAvailable(SharedBytes.IO channel, long channelPos, long relativePos, long length) throws IOException; + int onRangeAvailable(SharedBytes.IO channel, int channelPos, int relativePos, int length) throws IOException; } @FunctionalInterface public interface RangeMissingHandler { - void fillCacheRange(SharedBytes.IO channel, long channelPos, long relativePos, long length, LongConsumer progressUpdater) + void fillCacheRange(SharedBytes.IO channel, int channelPos, int relativePos, int length, IntConsumer progressUpdater) throws IOException; } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java index 03c423805558d..04347aaf6bff2 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java @@ -28,7 +28,6 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.function.IntConsumer; -import java.util.function.LongConsumer; public class SharedBytes extends AbstractRefCounted { @@ -58,7 +57,7 @@ public class SharedBytes extends AbstractRefCounted { private final IO[] ios; - final long regionSize; + final int regionSize; // TODO: for systems like Windows without true p-write/read support we should split this up into multiple channels since positional // operations in #IO are not contention-free there (https://bugs.java.com/bugdatabase/view_bug.do?bug_id=6265734) @@ -70,11 +69,11 @@ public class SharedBytes extends AbstractRefCounted { private final boolean mmap; - SharedBytes(int numRegions, long regionSize, NodeEnvironment environment, IntConsumer writeBytes, IntConsumer readBytes, boolean mmap) + SharedBytes(int numRegions, int regionSize, NodeEnvironment environment, IntConsumer writeBytes, IntConsumer readBytes, boolean mmap) throws IOException { this.numRegions = numRegions; this.regionSize = regionSize; - final long fileSize = numRegions * regionSize; + final long fileSize = (long) numRegions * regionSize; Path cacheFile = null; if (fileSize > 0) { cacheFile = findCacheSnapshotCacheFilePath(environment, fileSize); @@ -92,7 +91,7 @@ public class SharedBytes extends AbstractRefCounted { this.ios = new IO[numRegions]; if (mmap && fileSize > 0) { int regionsPerMmap = Math.toIntExact(MAX_BYTES_PER_MAP / regionSize); - int mapSize = Math.toIntExact(regionsPerMmap * regionSize); + int mapSize = regionsPerMmap * regionSize; int lastMapSize = Math.toIntExact(fileSize % mapSize); int mapCount = Math.toIntExact(fileSize / mapSize) + (lastMapSize == 0 ? 0 : 1); MappedByteBuffer[] mmaps = new MappedByteBuffer[mapCount]; @@ -105,10 +104,7 @@ public class SharedBytes extends AbstractRefCounted { lastMapSize == 0 ? mapSize : lastMapSize ); for (int i = 0; i < numRegions; i++) { - ios[i] = new IO( - i, - mmaps[i / regionsPerMmap].slice(Math.toIntExact((i % regionsPerMmap) * regionSize), Math.toIntExact(regionSize)) - ); + ios[i] = new IO(i, mmaps[i / regionsPerMmap].slice((i % regionsPerMmap) * regionSize, regionSize)); } } else { for (int i = 0; i < numRegions; i++) { @@ -157,21 +153,20 @@ public static Path findCacheSnapshotCacheFilePath(NodeEnvironment environment, l public static void copyToCacheFileAligned( IO fc, InputStream input, - long fileChannelPos, - long relativePos, - long length, - LongConsumer progressUpdater, + int fileChannelPos, + int relativePos, + int length, + IntConsumer progressUpdater, ByteBuffer buf ) throws IOException { - long bytesCopied = 0L; + int bytesCopied = 0; long remaining = length; while (remaining > 0L) { final int bytesRead = BlobCacheUtils.readSafe(input, buf, relativePos, remaining); if (buf.hasRemaining()) { break; } - long bytesWritten = positionalWrite(fc, fileChannelPos + bytesCopied, buf); - bytesCopied += bytesWritten; + bytesCopied += positionalWrite(fc, fileChannelPos + bytesCopied, buf); progressUpdater.accept(bytesCopied); remaining -= bytesRead; } @@ -180,15 +175,14 @@ public static void copyToCacheFileAligned( final int remainder = buf.position() % PAGE_SIZE; final int adjustment = remainder == 0 ? 0 : PAGE_SIZE - remainder; buf.position(buf.position() + adjustment); - long bytesWritten = positionalWrite(fc, fileChannelPos + bytesCopied, buf); - bytesCopied += bytesWritten; - final long adjustedBytesCopied = bytesCopied - adjustment; // adjust to not break RangeFileTracker + bytesCopied += positionalWrite(fc, fileChannelPos + bytesCopied, buf); + final int adjustedBytesCopied = bytesCopied - adjustment; // adjust to not break RangeFileTracker assert adjustedBytesCopied == length : adjustedBytesCopied + " vs " + length; progressUpdater.accept(adjustedBytesCopied); } } - private static int positionalWrite(IO fc, long start, ByteBuffer byteBuffer) throws IOException { + private static int positionalWrite(IO fc, int start, ByteBuffer byteBuffer) throws IOException { byteBuffer.flip(); int written = fc.write(byteBuffer, start); assert byteBuffer.hasRemaining() == false; @@ -207,18 +201,13 @@ private static int positionalWrite(IO fc, long start, ByteBuffer byteBuffer) thr * @return number of bytes read * @throws IOException on failure */ - public static int readCacheFile( - final IO fc, - long channelPos, - long relativePos, - long length, - final ByteBufferReference byteBufferReference - ) throws IOException { + public static int readCacheFile(final IO fc, int channelPos, int relativePos, int length, final ByteBufferReference byteBufferReference) + throws IOException { if (length == 0L) { return 0; } final int bytesRead; - final ByteBuffer dup = byteBufferReference.tryAcquire(Math.toIntExact(relativePos), Math.toIntExact(length)); + final ByteBuffer dup = byteBufferReference.tryAcquire(relativePos, length); if (dup != null) { try { bytesRead = fc.read(dup, channelPos); @@ -230,7 +219,7 @@ public static int readCacheFile( } } else { // return fake response - return Math.toIntExact(length); + return length; } return bytesRead; } @@ -256,50 +245,49 @@ public final class IO { private final MappedByteBuffer mappedByteBuffer; private IO(final int sharedBytesPos, MappedByteBuffer mappedByteBuffer) { - long physicalOffset = sharedBytesPos * regionSize; - assert physicalOffset <= numRegions * regionSize; + long physicalOffset = (long) sharedBytesPos * regionSize; + assert physicalOffset <= (long) numRegions * regionSize; this.pageStart = physicalOffset; this.mappedByteBuffer = mappedByteBuffer; } - public long pageStart() { - return pageStart; - } - @SuppressForbidden(reason = "Use positional reads on purpose") - public int read(ByteBuffer dst, long position) throws IOException { - checkOffsets(position, dst.remaining()); + public int read(ByteBuffer dst, int position) throws IOException { + int remaining = dst.remaining(); + checkOffsets(position, remaining); final int bytesRead; if (mmap) { - bytesRead = dst.remaining(); + bytesRead = remaining; int startPosition = dst.position(); - dst.put(startPosition, mappedByteBuffer, Math.toIntExact(position - pageStart), bytesRead) - .position(startPosition + bytesRead); + dst.put(startPosition, mappedByteBuffer, position, bytesRead).position(startPosition + bytesRead); } else { - bytesRead = fileChannel.read(dst, position); + bytesRead = fileChannel.read(dst, pageStart + position); } readBytes.accept(bytesRead); return bytesRead; } @SuppressForbidden(reason = "Use positional writes on purpose") - public int write(ByteBuffer src, long position) throws IOException { + public int write(ByteBuffer src, int position) throws IOException { // check if writes are page size aligned for optimal performance assert position % PAGE_SIZE == 0; assert src.remaining() % PAGE_SIZE == 0; checkOffsets(position, src.remaining()); - int bytesWritten = fileChannel.write(src, position); + int bytesWritten = fileChannel.write(src, pageStart + position); writeBytes.accept(bytesWritten); return bytesWritten; } - private void checkOffsets(long position, long length) { - long pageEnd = pageStart + regionSize; - if (position < pageStart || position > pageEnd || position + length > pageEnd) { - assert false; - throw new IllegalArgumentException("bad access"); + private void checkOffsets(int position, int length) { + if (position < 0 || position + length > regionSize) { + offsetCheckFailed(); } } + + private static void offsetCheckFailed() { + assert false; + throw new IllegalArgumentException("bad access"); + } } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index aa15adecead9b..5f7d2b79f994c 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.blobcache.shared; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -35,7 +36,6 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -336,6 +336,7 @@ public void testGetMultiThreaded() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99217") public void testFetchFullCacheEntry() throws Exception { Settings settings = Settings.builder() .put(NODE_NAME_SETTING.getKey(), "node") @@ -370,15 +371,14 @@ public void execute(Runnable command) { final var cacheKey = generateCacheKey(); assertEquals(5, cacheService.freeRegionCount()); AtomicLong bytesRead = new AtomicLong(size(250)); - CountDownLatch latch = new CountDownLatch(1); + final PlainActionFuture future = PlainActionFuture.newFuture(); cacheService.maybeFetchFullEntry(cacheKey, size(250), (channel, channelPos, relativePos, length, progressUpdater) -> { progressUpdater.accept(length); - if (bytesRead.addAndGet(-length) == 0) { - latch.countDown(); - } - }); + bytesRead.addAndGet(-length); + }, future); - assertTrue(latch.await(10, TimeUnit.SECONDS)); + future.get(10, TimeUnit.SECONDS); + assertEquals(0L, bytesRead.get()); assertEquals(2, cacheService.freeRegionCount()); assertEquals(3, bulkTaskCount.get()); } @@ -388,7 +388,7 @@ public void execute(Runnable command) { assertEquals(2, cacheService.freeRegionCount()); var configured = cacheService.maybeFetchFullEntry(cacheKey, size(500), (ch, chPos, relPos, len, update) -> { throw new AssertionError("Should never reach here"); - }); + }, ActionListener.noop()); assertFalse(configured); assertEquals(2, cacheService.freeRegionCount()); } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java index 24625a91d0975..fa7ec6dbfd5a8 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java @@ -29,7 +29,7 @@ public void testReleasesFileCorrectly() throws Exception { try (var nodeEnv = new NodeEnvironment(nodeSettings, TestEnvironment.newEnvironment(nodeSettings))) { final SharedBytes sharedBytes = new SharedBytes( regions, - randomIntBetween(1, 16) * 4096L, + randomIntBetween(1, 16) * 4096, nodeEnv, ignored -> {}, ignored -> {}, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 752aab933fb81..ac396948abcf7 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -143,7 +143,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E public static final String CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY = "remote_cluster_name"; public static final String REQUESTED_OPS_MISSING_METADATA_KEY = "es.requested_operations_missing"; - public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersion.V_8_500_010; + public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersion.V_8_500_020; private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index e077334b20be6..c78e42d3c2113 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -47,7 +47,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_500_010; + return TransportVersion.V_8_500_020; } @Override @@ -111,7 +111,7 @@ public LifecycleStats( } public static LifecycleStats read(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { return new LifecycleStats(in.readVLong(), in.readVLong(), in.readVLong(), in.readDouble(), in.readBoolean()); } else { return INITIAL; @@ -120,7 +120,7 @@ public static LifecycleStats read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeVLong(dataStreamsWithLifecyclesCount); out.writeVLong(minRetentionMillis); out.writeVLong(maxRetentionMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index afc511ef16d40..54b9fe7d76a85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -77,16 +77,16 @@ static class ScheduledRunnable { // with wrapping the command in RunOnce we ensure the command isn't executed twice, e.g. if the // future is already running and cancel returns true this.command = new RunOnce(command); - this.scheduled = threadPool.schedule(command::run, delay, threadPool.generic()); + this.scheduled = threadPool.schedule(command, delay, threadPool.generic()); } public void reschedule(TimeValue delay) { // note: cancel return true if the runnable is currently executing if (scheduled.cancel()) { if (delay.duration() > 0) { - scheduled = threadPool.schedule(command::run, delay, threadPool.generic()); + scheduled = threadPool.schedule(command, delay, threadPool.generic()); } else { - threadPool.generic().execute(command::run); + threadPool.generic().execute(command); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index ff85727ef0c62..007261d5321c9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -79,7 +79,7 @@ public Request(StreamInput in) throws IOException { advanceTime = in.readOptionalString(); skipTime = in.readOptionalString(); waitForNormalization = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { refreshRequired = in.readBoolean(); } } @@ -93,7 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(advanceTime); out.writeOptionalString(skipTime); out.writeBoolean(waitForNormalization); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java index 0f4ba65765714..8c8c87696c39c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java @@ -86,7 +86,7 @@ public Request(StreamInput in) throws IOException { } else { this.merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { this.scores = in.readCollectionAsList(StreamInput::readDouble); } else { this.scores = List.of(); @@ -136,7 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeCollection(scores, StreamOutput::writeDouble); } if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_043)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index 06def9204686e..48d2cf5bbd9ef 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -66,7 +66,7 @@ public FlushAcknowledgement(String id, Instant lastFinalizedBucketEnd, Boolean r public FlushAcknowledgement(StreamInput in) throws IOException { id = in.readString(); lastFinalizedBucketEnd = in.readOptionalInstant(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { refreshRequired = in.readBoolean(); } else { refreshRequired = true; @@ -77,7 +77,7 @@ public FlushAcknowledgement(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeOptionalInstant(lastFinalizedBucketEnd); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java index 4d544b6bceb38..b8d0e8a22aff5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java @@ -136,7 +136,7 @@ public AsyncStatusResponse(StreamInput in) throws IOException { this.skippedShards = in.readVInt(); this.failedShards = in.readVInt(); this.completionStatus = (this.isRunning == false) ? RestStatus.readFrom(in) : null; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { this.clusters = in.readOptionalWriteable(SearchResponse.Clusters::new); } else { this.clusters = null; @@ -162,7 +162,7 @@ public void writeTo(StreamOutput out) throws IOException { if (isRunning == false) { RestStatus.writeTo(out, completionStatus); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { // optional since only CCS uses is; it is null for local-only searches out.writeOptionalWriteable(clusters); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index d80f82d0942c3..b539b74e959bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -45,7 +45,7 @@ */ public final class ApiKey implements ToXContentObject, Writeable { - public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersion.V_8_500_010; + public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersion.V_8_500_020; public enum Type { /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 977031ebf28d3..8748d464fc8a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -53,7 +53,7 @@ */ public class RoleDescriptor implements ToXContentObject, Writeable { - public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersion.V_8_500_010; + public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersion.V_8_500_020; public static final String ROLE_TYPE = "role"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index f26152d66045d..68004c264f6ed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -389,6 +389,7 @@ public void testGetPersistableSafeSecurityHeaders() throws IOException { final ClusterState clusterState = mock(ClusterState.class); final DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class); when(clusterState.nodes()).thenReturn(discoveryNodes); + when(clusterState.getMinTransportVersion()).thenReturn(TransportVersion.MINIMUM_COMPATIBLE); // No security header ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final String nonSecurityHeaderKey = "not-a-security-header"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java index be82f11b00260..9ee5e7e00c727 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java @@ -52,7 +52,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_500_012)) { + if (version.before(TransportVersion.V_8_500_020)) { instance.setRefreshRequired(true); } return instance; diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java index 0c947bf22133b..faa67479cc0d5 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java @@ -107,6 +107,7 @@ public void testDownsampling() throws Exception { assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); // the last downsampling round must remain in the data stream assertThat(dsBackingIndices.get(0), is(tenSecondsDownsampleIndex)); + assertThat(indexExists(oneSecondDownsampleIndex), is(false)); }, 30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java index d4e0cb7c8e5b9..4f33ae0966abf 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java @@ -210,8 +210,14 @@ private void startDownsampleTaskViaIlm( var request = new UpdateSettingsRequest(sourceIndex).settings( Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, POLICY_NAME) ); + // Updating index.lifecycle.name setting may fail due to the rolling restart itself, + // we need to attempt it in a assertBusy(...) assertBusy(() -> { try { + if (indexExists(sourceIndex) == false) { + logger.info("The source index [{}] no longer exists, downsampling likely completed", sourceIndex); + return; + } client().admin().indices().updateSettings(request).actionGet(TimeValue.timeValueSeconds(10)); } catch (Exception e) { logger.warn(() -> format("encountered failure while updating [%s] index's ilm policy", sourceIndex), e); diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml index 7f8b5d5495195..4103bee7e290f 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml @@ -320,3 +320,89 @@ setup: - match: { values.0: [ 1 ] } - match: { values.0: [ 1 ] } +--- +"text with synthetic source": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/99183" + + - do: + indices.create: + index: test2 + body: + mappings: + _source: + mode: synthetic + properties: + "emp_no": + type: long + name: + type: keyword + job: + type: text + fields: + raw: + type: keyword + + - do: + bulk: + index: test2 + refresh: true + body: + - { "index": { } } + - { "emp_no": 10, "name": "Jenny", "job": "IT Director" } + - { "index": { } } + - { "emp_no": 20, "name": "John", "job": "Payroll Specialist" } + + - do: + esql.query: + body: + query: 'from test2 | sort emp_no | keep job' + + - match: { columns.0.name: "job" } + - match: { columns.0.type: "text" } + + - length: { values: 2 } + - match: { values.0.0: "IT Director" } + - match: { values.1.0: "Payroll Specialist" } + + +--- +"stored text with synthetic source": + - do: + indices.create: + index: test2 + body: + mappings: + _source: + mode: synthetic + properties: + "emp_no": + type: long + name: + type: keyword + job: + type: text + store: true + + - do: + bulk: + index: test2 + refresh: true + body: + - { "index": { } } + - { "emp_no": 10, "name": "Jenny", "job": "IT Director"} + - { "index": { } } + - { "emp_no": 20, "name": "John", "job": "Payroll Specialist" } + + - do: + esql.query: + body: + query: 'from test2 | sort emp_no | keep job' + + - match: { columns.0.name: "job" } + - match: { columns.0.type: "text" } + + - length: { values: 2 } + - match: { values.0.0: "IT Director" } + - match: { values.1.0: "Payroll Specialist" } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 85b47891e3c41..3ecb31722277c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -212,7 +212,7 @@ autoBucket FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" | EVAL bh = auto_bucket(height, 20, 1.41, 2.10) -| SORT hire_date +| SORT hire_date, height | KEEP hire_date, height, bh ; @@ -225,8 +225,8 @@ hire_date:date | height:double | bh:double 1985-10-14T00:00:00.000Z | 1.77 | 1.75 1985-10-20T00:00:00.000Z | 1.94 | 1.9000000000000001 1985-11-19T00:00:00.000Z | 1.8 | 1.8 -1985-11-20T00:00:00.000Z | 1.99 | 1.9500000000000002 1985-11-20T00:00:00.000Z | 1.93 | 1.9000000000000001 +1985-11-20T00:00:00.000Z | 1.99 | 1.9500000000000002 1985-11-21T00:00:00.000Z | 2.08 | 2.0500000000000003 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 4114317a2da62..daef21b57f1db 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -363,7 +363,7 @@ autoBucket FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" | EVAL bs = auto_bucket(salary, 20, 25324, 74999) -| SORT hire_date +| SORT hire_date, salary | KEEP hire_date, salary, bs // end::auto_bucket[] ; @@ -378,8 +378,8 @@ hire_date:date | salary:integer | bs:double 1985-10-14T00:00:00.000Z | 54329 | 50000.0 1985-10-20T00:00:00.000Z | 48735 | 45000.0 1985-11-19T00:00:00.000Z | 52833 | 50000.0 -1985-11-20T00:00:00.000Z | 74999 | 70000.0 1985-11-20T00:00:00.000Z | 33956 | 30000.0 +1985-11-20T00:00:00.000Z | 74999 | 70000.0 1985-11-21T00:00:00.000Z | 56371 | 55000.0 // end::auto_bucket-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec index 6adda6ee08a5d..3637081c3c4b6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec @@ -266,10 +266,10 @@ height:double | languages.long:long | still_hired:boolean ; simpleEvalWithSortAndLimitOne -from employees | eval x = languages + 7 | sort x | limit 1; +from employees | eval x = languages + 7 | sort x, avg_worked_seconds | limit 1; avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | height:double | height.float:double | height.half_float:double | height.scaled_float:double | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer | salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean | x:integer -244294991 |1955-01-21T00:00:00.000Z|10005 |Kyoichi |M |2.05 |2.049999952316284|2.05078125 |2.05 |1989-09-12T00:00:00.000Z|[false, false, false, true]|null |1 |1 |1 |1 |Maliniak |63528 |[-2.14, 13.07] |[-2, 13] |[-2.14, 13.07] |[-2, 13] |true |8 +208374744 |1956-11-14T00:00:00.000Z|10033 |null |M |1.63 |1.6299999952316284|1.6298828125 |1.6300000000000001 |1987-03-18T00:00:00.000Z|true |null |1 |1 |1 |1 |Merlo |70011 |null |null |null |null |false |8 ; evalOfAverageValue diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index f4ec5e15a2c39..161c2c501ada0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -324,53 +324,53 @@ c:long | gender:keyword | hire_year_str:keyword ; byLongAndLong -from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(languages.long) by languages.long, trunk_worked_seconds | sort c desc; +from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(languages.long) by languages.long, trunk_worked_seconds | sort c desc, languages.long, trunk_worked_seconds; -c:long | languages.long:long | trunk_worked_seconds:long -15 | 5 | 300000000 -11 | 2 | 300000000 -10 | 4 | 300000000 - 9 | 3 | 200000000 - 8 | 2 | 200000000 - 8 | 4 | 200000000 - 8 | 3 | 300000000 - 8 | 1 | 200000000 - 7 | 1 | 300000000 - 6 | 5 | 200000000 +c:long | languages.long:long | trunk_worked_seconds:long +15 |5 |300000000 +11 |2 |300000000 +10 |4 |300000000 +9 |3 |200000000 +8 |1 |200000000 +8 |2 |200000000 +8 |3 |300000000 +8 |4 |200000000 +7 |1 |300000000 +6 |5 |200000000 ; byUnmentionedLongAndLong -from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages.long, trunk_worked_seconds | sort c desc; +from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages.long, trunk_worked_seconds | sort c desc, trunk_worked_seconds; c:long | languages.long:long | trunk_worked_seconds:long -13 | 5 | 300000000 -10 | 2 | 300000000 - 9 | 4 | 300000000 - 9 | 3 | 200000000 - 8 | 4 | 200000000 - 8 | 3 | 300000000 - 7 | 1 | 200000000 - 6 | 2 | 200000000 - 6 | 1 | 300000000 - 4 | 5 | 200000000 +13 |5 |300000000 +10 |2 |300000000 +9 |3 |200000000 +9 |4 |300000000 +8 |4 |200000000 +8 |3 |300000000 +7 |1 |200000000 +6 |2 |200000000 +6 |1 |300000000 +4 |5 |200000000 ; byUnmentionedIntAndLong -from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages, trunk_worked_seconds | sort c desc; +from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages, trunk_worked_seconds | sort c desc, trunk_worked_seconds; c:long | languages:integer | trunk_worked_seconds:long - 13 | 5 | 300000000 - 10 | 2 | 300000000 - 9 | 4 | 300000000 - 9 | 3 | 200000000 - 8 | 4 | 200000000 - 8 | 3 | 300000000 - 7 | 1 | 200000000 - 6 | 2 | 200000000 - 6 | null | 300000000 - 6 | 1 | 300000000 - 4 | null | 200000000 - 4 | 5 | 200000000 +13 |5 |300000000 +10 |2 |300000000 +9 |3 |200000000 +9 |4 |300000000 +8 |4 |200000000 +8 |3 |300000000 +7 |1 |200000000 +6 |2 |200000000 +6 |null |300000000 +6 |1 |300000000 +4 |null |200000000 +4 |5 |200000000 ; byUnmentionedIntAndBoolean @@ -474,8 +474,7 @@ min(salary):i | max(salary):i | c:l ; statsWithLiterals -from employees | limit 10 | eval x = 1 | stats c = count(x) -; +from employees | limit 10 | eval x = 1 | stats c = count(x); c:l 10 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java index c1c86c007032e..b057dd8023031 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java @@ -107,7 +107,8 @@ public PhysicalPlan apply(PhysicalPlan plan) { aliases.put(attr, as.child()); attributes.remove(attr); } else { - if (aliases.containsKey(attr) == false) { + // skip synthetically added attributes (the ones from AVG), see LogicalPlanOptimizer.SubstituteSurrogates + if (attr.synthetic() == false && aliases.containsKey(attr) == false) { attributes.add(attr); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index ef3e24318c9a6..fa14501edc50d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -132,7 +131,7 @@ * * To log the results logResults() should return "true". */ -@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") +// @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") public class CsvTests extends ESTestCase { private static final Logger LOGGER = LogManager.getLogger(CsvTests.class); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 1a5aa81eea418..83d71d2cc3ed1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.MetadataAttribute; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Order; @@ -1784,6 +1785,49 @@ public void testProjectAllFieldsWhenOnlyTheCountMatters() { var source = source(eval.child()); } + /** + * ProjectExec[[a{r}#5]] + * \_EvalExec[[__a_SUM@81823521{r}#15 / __a_COUNT@31645621{r}#16 AS a]] + * \_LimitExec[10000[INTEGER]] + * \_AggregateExec[[],[SUM(salary{f}#11) AS __a_SUM@81823521, COUNT(salary{f}#11) AS __a_COUNT@31645621],FINAL,24] + * \_AggregateExec[[],[SUM(salary{f}#11) AS __a_SUM@81823521, COUNT(salary{f}#11) AS __a_COUNT@31645621],PARTIAL,16] + * \_LimitExec[10[INTEGER]] + * \_ExchangeExec[[],false] + * \_ProjectExec[[salary{f}#11]] + * \_FieldExtractExec[salary{f}#11] + * \_EsQueryExec[test], query[][_doc{f}#17], limit[10], sort[] estimatedRowSize[8] + */ + public void testAvgSurrogateFunctionAfterRenameAndLimit() { + var plan = optimizedPlan(physicalPlan(""" + from test + | limit 10 + | rename first_name as FN + | stats a = avg(salary) + """)); + + var project = as(plan, ProjectExec.class); + var eval = as(project.child(), EvalExec.class); + var limit = as(eval.child(), LimitExec.class); + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(limit.limit().fold(), equalTo(10000)); + var aggFinal = as(limit.child(), AggregateExec.class); + assertThat(aggFinal.getMode(), equalTo(AggregateExec.Mode.FINAL)); + var aggPartial = as(aggFinal.child(), AggregateExec.class); + assertThat(aggPartial.getMode(), equalTo(AggregateExec.Mode.PARTIAL)); + limit = as(aggPartial.child(), LimitExec.class); + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(limit.limit().fold(), equalTo(10)); + + var exchange = as(limit.child(), ExchangeExec.class); + project = as(exchange.child(), ProjectExec.class); + var expectedFields = List.of("salary"); + assertThat(Expressions.names(project.projections()), is(expectedFields)); + var fieldExtract = as(project.child(), FieldExtractExec.class); + assertThat(Expressions.names(fieldExtract.attributesToExtract()), is(expectedFields)); + var source = source(fieldExtract.child()); + assertThat(source.limit().fold(), equalTo(10)); + } + private static EsQueryExec source(PhysicalPlan plan) { if (plan instanceof ExchangeExec exchange) { plan = exchange.child(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java index 6613f8e494cf5..2ace57fc57614 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java @@ -69,7 +69,7 @@ public Vocabulary(StreamInput in) throws IOException { } else { merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { scores = in.readCollectionAsList(StreamInput::readDouble); } else { scores = List.of(); @@ -95,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { out.writeCollection(scores, StreamOutput::writeDouble); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java index 020ee64a0328a..9fa0c9163da6e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; @@ -19,15 +20,15 @@ public class TransportVersionUtilsTests extends ESTestCase { - private static final Map transportVersions = Map.of( + private static final Map transportVersions = Map.of( "Alfredo", - TransportVersion.V_7_0_0, + new CompatibilityVersions(TransportVersion.V_7_0_0), "Bertram", - TransportVersion.V_7_0_1, + new CompatibilityVersions(TransportVersion.V_7_0_1), "Charles", - TransportVersion.V_8_500_010, + new CompatibilityVersions(TransportVersion.V_8_500_020), "Dominic", - TransportVersion.V_8_0_0 + new CompatibilityVersions(TransportVersion.V_8_0_0) ); private static final ClusterState state = new ClusterState( @@ -51,7 +52,7 @@ public void testGetMinTransportVersion() { public void testIsMinTransformVersionSameAsCurrent() { assertThat(TransportVersionUtils.isMinTransportVersionSameAsCurrent(state), equalTo(false)); - Map transportVersions1 = Map.of("Eugene", TransportVersion.current()); + Map transportVersions1 = Map.of("Eugene", new CompatibilityVersions(TransportVersion.current())); ClusterState state1 = new ClusterState( new ClusterName("harry"), @@ -72,6 +73,6 @@ public void testIsMinTransformVersionSameAsCurrent() { public void testIsMinTransportVersionOnOrAfter() { assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersion.V_7_0_0), equalTo(true)); - assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersion.V_8_500_010), equalTo(false)); + assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersion.V_8_500_020), equalTo(false)); } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 024a3051e2b19..40a04115173fc 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -768,7 +768,8 @@ public void testToXContent() throws IOException { "max_index_version":%s } }, - "transport_versions": [] + "transport_versions": [], + "nodes_versions": [] }, "cluster_settings": { "cluster": { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 01e9c7587f8db..0aed4029a9836 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -256,7 +256,7 @@ private static DiscoveryNode addAnotherPre8500DataNode(ClusterService clusterSer transportVersion = TransportVersion.V_8_8_1; } else { version = Version.V_8_9_0; - transportVersion = TransportVersion.V_8_500_015; + transportVersion = TransportVersion.V_8_500_020; } return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml index dab154917b17b..8a3de7cc4b855 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml @@ -37,7 +37,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -50,7 +50,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -62,7 +62,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -75,7 +75,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -235,7 +235,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "only_read", "_id": "13"}}' - '{"name": "doc13"}' @@ -246,6 +245,10 @@ teardown: - match: { items.0.index.error.type: "security_exception" } - match: { items.1.index.status: 201 } + - do: # superuser + indices.refresh: + index: only_index + - do: # superuser search: rest_total_hits_as_int: true diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml index 5c19aa3bbfcad..094dc6fa01097 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml @@ -37,7 +37,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -50,7 +50,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -62,7 +62,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -75,7 +75,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -138,7 +138,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user delete: - refresh: true index: only_delete id: "3" @@ -152,7 +151,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"delete": {"_index": "only_delete", "_id": "4"}}' - '{"delete": {"_index": "everything" , "_id": "9"}}' @@ -160,7 +158,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - '{"delete": {"_index": "only_delete", "_id": "5"}}' - '{"delete": {"_index": "only_delete", "_id": "5"}}' @@ -177,6 +174,10 @@ teardown: _index: everything _id: "10" + - do: # superuser + indices.refresh: + index: only_delete + - do: # superuser search: rest_total_hits_as_int: true @@ -197,7 +198,6 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user delete: - refresh: true index: only_read id: "1" @@ -205,14 +205,12 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user delete: - refresh: true index: only_index id: "2" - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"delete": {"_index": "only_read" , "_id": "1"}}' - '{"delete": {"_index": "only_index", "_id": "2"}}' @@ -226,7 +224,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - '{"delete": {"_index": "only_read" , "_id": "1"}}' - '{"delete": {"_index": "only_read" , "_id": "1"}}' @@ -240,7 +237,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - '{"delete": {"_index": "only_index", "_id": "2"}}' - '{"delete": {"_index": "only_index", "_id": "2"}}' @@ -251,6 +247,10 @@ teardown: - match: { items.1.delete.status: 403 } - match: { items.1.delete.error.type: "security_exception" } + - do: # superuser + indices.refresh: + index: only_read,only_index + - do: # superuser search: rest_total_hits_as_int: true @@ -270,7 +270,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"delete": {"_index": "only_read" , "_id": "1"}}' - '{"delete": {"_index": "only_delete", "_id": "6"}}' @@ -279,6 +278,10 @@ teardown: - match: { items.0.delete.error.type: "security_exception" } - match: { items.1.delete.status: 200 } + - do: # superuser + indices.refresh: + index: only_read,only_delete + - do: # superuser search: rest_total_hits_as_int: true @@ -298,7 +301,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index" : {"_index": "only_delete", "_id": "11"}}' - '{"name" : "doc11"}' @@ -314,6 +316,10 @@ teardown: - match: { items.3.delete.status: 403 } - match: { items.3.delete.error.type: "security_exception" } + - do: # superuser + indices.refresh: + index: only_delete,only_index + - do: # superuser search: rest_total_hits_as_int: true diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml index e1901ced2817e..8c0ba52f23236 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml @@ -35,7 +35,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -48,7 +48,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -61,7 +61,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -147,7 +147,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_write_1", "_id": "3"}}' - '{"name": "doc3"}' @@ -157,7 +156,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_write_1", "_id": "5"}}' - '{"name": "doc5"}' @@ -167,7 +165,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_write_1", "_id": "7"}}' - '{"name": "doc7"}' @@ -176,6 +173,10 @@ teardown: - '{"index": {"_index": "can_write_3", "_id": "9"}}' - '{"name": "doc9"}' + - do: # superuser + indices.refresh: + index: write_index_* + - do: # superuser search: rest_total_hits_as_int: true @@ -194,7 +195,6 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user create: - refresh: true id: "7" index: can_read_1 body: > @@ -206,7 +206,6 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user create: - refresh: true id: "8" index: can_read_2 body: > @@ -217,7 +216,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "9"}}' - '{"name": "doc9"}' @@ -232,7 +230,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "11"}}' - '{"name": "doc11"}' @@ -244,6 +241,10 @@ teardown: - match: { items.1.index.status: 403 } - match: { items.1.index.error.type: "security_exception" } + - do: # superuser + indices.refresh: + index: read_index + - do: # superuser search: rest_total_hits_as_int: true @@ -255,7 +256,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "13"}}' - '{"name": "doc13"}' @@ -266,6 +266,10 @@ teardown: - match: { items.0.index.error.type: "security_exception" } - match: { items.1.index.status: 201 } + - do: # superuser + indices.refresh: + index: write_index_1 + - do: # superuser search: rest_total_hits_as_int: true @@ -276,7 +280,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "15"}}' - '{"name": "doc15"}' @@ -297,6 +300,10 @@ teardown: - match: { items.3.index.status: 201 } - match: { items.4.index.status: 201 } + - do: # superuser + indices.refresh: + index: write_index_* + - do: # superuser search: rest_total_hits_as_int: true @@ -336,7 +343,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "20"}}' - '{"name": "doc20"}' @@ -363,6 +369,10 @@ teardown: - match: { items.5.update.status: 200 } - match: { items.6.delete.status: 200 } + - do: # superuser + indices.refresh: + index: write_index_* + - do: # superuser search: rest_total_hits_as_int: true diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml index 34bb3f58d1901..978cf84983190 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml @@ -164,7 +164,7 @@ teardown: - match: $body: | - /^(yellow \s+ + /^((yellow|green) \s+ close \s+ index_to_monitor \s+ ([a-zA-Z0-9=/_+]|[\\\-]){22} \s+ diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml index 6e22bb4b8b43e..771920c4b13f4 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml @@ -38,7 +38,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -51,7 +51,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -63,7 +63,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -75,7 +75,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -87,7 +87,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml index 7bba2a7617a16..56ade8918efe4 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml @@ -38,7 +38,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -52,7 +52,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -66,7 +66,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -80,7 +80,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -94,7 +94,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml index 7b50942478751..921486ba2d220 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml @@ -34,7 +34,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 - do: indices.put_alias: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml index 73979883291c3..6d3a014f8b97b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml @@ -35,7 +35,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 - do: indices.put_alias: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml index f9a6147533e8e..8044e9bc3b8ab 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml @@ -232,7 +232,6 @@ create_doc permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"create": {}}' @@ -242,7 +241,6 @@ create_doc permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user index: - refresh: true index: test body: "@timestamp": "2021-04-28T23:51:03.142Z" @@ -257,6 +255,16 @@ create_doc permission can create: rx: 430605511 - match: { _version: 1 } + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") + 2 more (above) + - match: { hits.total.value: 10 } + - match: { hits.total.relation: "eq" } + --- create_doc permission can't overwrite: - skip: @@ -290,7 +298,6 @@ create_doc permission can't overwrite: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"index": {}}' @@ -301,7 +308,6 @@ create_doc permission can't overwrite: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user catch: "/is\ unauthorized\ for\ user\ \\[limited\\]/" index: - refresh: true index: test op_type: index body: @@ -316,6 +322,16 @@ create_doc permission can't overwrite: tx: 111434595272 rx: 430605511 + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") but no more (above) + - match: { hits.total.value: 8 } + - match: { hits.total.relation: "eq" } + --- index permission can create: - skip: @@ -349,7 +365,6 @@ index permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"create": {}}' @@ -359,7 +374,6 @@ index permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user index: - refresh: true index: test body: "@timestamp": "2021-04-28T23:51:03.142Z" @@ -374,6 +388,16 @@ index permission can create: rx: 430605511 - match: { _version: 1 } + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") + 2 more (above) + - match: { hits.total.value: 10 } + - match: { hits.total.relation: "eq" } + --- index permission can overwrite: - skip: @@ -407,7 +431,6 @@ index permission can overwrite: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"index": {}}' @@ -417,7 +440,6 @@ index permission can overwrite: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user index: - refresh: true index: test op_type: index body: @@ -432,3 +454,13 @@ index permission can overwrite: tx: 111434595272 rx: 430605511 - match: { _version: 2 } + + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") even though 2 have been overwritten (above) + - match: { hits.total.value: 8 } + - match: { hits.total.relation: "eq" }