Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Search during index create #66853

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions docs/changelog/66853.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 66853
summary: Search during index create
area: Search
type: enhancement
issues:
- 65846
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.search.basic;

import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.disruption.NetworkDisruption;
import org.elasticsearch.test.transport.MockTransportService;

import java.util.Collection;
import java.util.Collections;
import java.util.Set;

import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;


/**
* This integration test contains two tests to provoke a search failure while initializing a new empty index:
* <ul>
* <li>testSearchDuringCreate: just tries to create an index and search, has low likelihood for provoking issue (but it does happen)
* </li>
* <li>testDelayIsolatedPrimary: delays network messages from all nodes except search coordinator to primary, ensuring that every test
* run hits the case where a primary is initializing a newly created shard</li>
* </ul>
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class SearchWhileInitializingEmptyIT extends ESIntegTestCase {

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(MockTransportService.TestPlugin.class);
}

public void testSearchDuringCreate() {
ActionFuture<CreateIndexResponse> createFuture = prepareCreate("test").execute();

for (int i = 0; i < 100; ++i) {
SearchResponse searchResponse = client().prepareSearch("test*").setAllowPartialSearchResults(randomBoolean()).get();
assertThat(searchResponse.getFailedShards(), equalTo(0));
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L));
}

logger.info("done searching");
assertAcked(createFuture.actionGet());
}

public void testDelayIsolatedPrimary() throws Exception {
String[] originalNodes = internalCluster().getNodeNames();
String dataNode = internalCluster().startDataOnlyNode();
String coordinatorNode = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);

NetworkDisruption.Bridge bridge = new NetworkDisruption.Bridge(coordinatorNode, Set.of(dataNode), Set.of(originalNodes));
NetworkDisruption scheme =
new NetworkDisruption(bridge, new NetworkDisruption.NetworkDelay(NetworkDisruption.NetworkDelay.DEFAULT_DELAY_MIN));
setDisruptionScheme(scheme);
scheme.startDisrupting();

ActionFuture<CreateIndexResponse> createFuture;
try {
Settings.Builder builder = Settings.builder().put(indexSettings())
.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name", dataNode);

createFuture =
internalCluster().masterClient().admin().indices()
.create(new CreateIndexRequest("test", builder.build()).timeout(TimeValue.ZERO));

// wait for available on coordinator
assertBusy(() -> {
try {
client(coordinatorNode).get(new GetRequest("test", "0")).actionGet();
throw new IllegalStateException("non-assertion exception to escape assertBusy, get request must fail");
} catch (IndexNotFoundException e) {
throw new AssertionError(e);
} catch (NoShardAvailableActionException e) {
// now coordinator knows about the index.
}
});

// not available on data node.
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () ->
client(dataNode).get(new GetRequest("test", "0")).actionGet());
assertThat(exception, anyOf(instanceOf(NoShardAvailableActionException.class), instanceOf(IndexNotFoundException.class)));

for (String indices : new String[] {"test*", "tes*", "test"}){
logger.info("Searching for [{}]", indices);
SearchResponse searchResponse =
client(coordinatorNode).prepareSearch(indices).setAllowPartialSearchResults(randomBoolean()).get();
assertThat(searchResponse.getFailedShards(), equalTo(0));
assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L));
}
} finally {
internalCluster().clearDisruptionScheme(true);
}
createFuture.actionGet();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ public static ShardIterator getShards(ClusterState clusterState, ShardId shardId

private static final Map<String, Set<String>> EMPTY_ROUTING = Collections.emptyMap();

private Set<IndexShardRoutingTable> computeTargetedShards(ClusterState clusterState, String[] concreteIndices,
static Set<IndexShardRoutingTable> computeTargetedShards(ClusterState clusterState, String[] concreteIndices,
@Nullable Map<String, Set<String>> routing) {
routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map
final Set<IndexShardRoutingTable> set = new HashSet<>();
Expand All @@ -118,12 +118,20 @@ private Set<IndexShardRoutingTable> computeTargetedShards(ClusterState clusterSt
for (String r : effectiveRouting) {
final int routingPartitionSize = indexMetadata.getRoutingPartitionSize();
for (int partitionOffset = 0; partitionOffset < routingPartitionSize; partitionOffset++) {
set.add(RoutingTable.shardRoutingTable(indexRouting, calculateScaledShardId(indexMetadata, r, partitionOffset)));
IndexShardRoutingTable indexShard =
RoutingTable.shardRoutingTable(indexRouting, calculateScaledShardId(indexMetadata, r, partitionOffset));
if (indexShard.primary.active()
|| indexShard.primary.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
set.add(indexShard);
}
}
}
} else {
for (IndexShardRoutingTable indexShard : indexRouting) {
set.add(indexShard);
if (indexShard.primary.active()
|| indexShard.primary.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
set.add(indexShard);
}
}
}
}
Expand Down Expand Up @@ -201,15 +209,15 @@ private ShardIterator shardRoutings(IndexShardRoutingTable indexShard, Discovery
}
}

protected IndexRoutingTable indexRoutingTable(ClusterState clusterState, String index) {
protected static IndexRoutingTable indexRoutingTable(ClusterState clusterState, String index) {
IndexRoutingTable indexRouting = clusterState.routingTable().index(index);
if (indexRouting == null) {
throw new IndexNotFoundException(index);
}
return indexRouting;
}

protected IndexMetadata indexMetadata(ClusterState clusterState, String index) {
protected static IndexMetadata indexMetadata(ClusterState clusterState, String index) {
IndexMetadata indexMetadata = clusterState.metadata().index(index);
if (indexMetadata == null) {
throw new IndexNotFoundException(index);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,12 @@

import org.elasticsearch.Version;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
Expand All @@ -43,11 +47,17 @@
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.stream.Collectors;
import java.util.stream.IntStream;

import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.object.HasToString.hasToString;

public class OperationRoutingTests extends ESTestCase{
Expand Down Expand Up @@ -606,4 +616,65 @@ public void testAdaptiveReplicaSelection() throws Exception {
terminate(threadPool);
}

public void testComputeTargetShards() {
DiscoveryNode targetNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT);
int shards = between(1, 5);
int replicas = between(0, 5);
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(settings(Version.CURRENT))
.numberOfShards(shards).numberOfReplicas(replicas)
.build();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
routingTableBuilder.addAsNew(indexMetadata);

final ClusterState stateWithUnassigned = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(targetNode).build())
.metadata(Metadata.builder().put(indexMetadata, false))
.routingTable(routingTableBuilder.build()).build();
Set<String> routingValues = IntStream.range(0, 5).mapToObj(i -> randomAlphaOfLength(5)).collect(Collectors.toSet());;
assertThat(OperationRouting.computeTargetedShards(stateWithUnassigned, new String[]{"test"}, null), empty());
assertThat(OperationRouting.computeTargetedShards(stateWithUnassigned, new String[]{"test"}, Map.of("test", routingValues)),
empty());
assertThat(OperationRouting.computeTargetedShards(stateWithUnassigned, new String[]{"test"}, Map.of("not_test", routingValues)),
empty());

RoutingChangesObserver routingChangesObserver = new RoutingChangesObserver.AbstractRoutingChangesObserver();
RoutingNodes routingNodes = new RoutingNodes(stateWithUnassigned, false);
int initializeCount = between(1, shards);
Set<ShardRouting> initialized = new HashSet<>();
for (RoutingNodes.UnassignedShards.UnassignedIterator iterator = routingNodes.unassigned().iterator(); iterator.hasNext(); ) {
ShardRouting next = iterator.next();
if (next.primary()) {
initialized.add(iterator.initialize(targetNode.getId(), null, 0, routingChangesObserver));
}
if (initialized.size() >= initializeCount) {
break;
}
}

final ClusterState stateWithInitialized = ClusterState.builder(stateWithUnassigned)
.routingTable(new RoutingTable.Builder().updateNodes(stateWithUnassigned.routingTable().version(), routingNodes).build())
.build();
assertThat(OperationRouting.computeTargetedShards(stateWithInitialized, new String[]{"test"}, null), empty());
assertThat(OperationRouting.computeTargetedShards(stateWithInitialized, new String[]{"test"}, Map.of("test", routingValues)),
empty());
assertThat(OperationRouting.computeTargetedShards(stateWithInitialized, new String[]{"test"}, Map.of("not_test", routingValues)),
empty());

RoutingNodes routingNodesWithStarted = new RoutingNodes(stateWithInitialized, false);
Set<ShardRouting> startedShards = new HashSet<>();
for (ShardRouting shardRouting : initialized) {
startedShards.add(routingNodesWithStarted.startShard(logger, shardRouting, routingChangesObserver));
}

final ClusterState stateWithStarted = ClusterState.builder(stateWithInitialized)
.routingTable(new RoutingTable.Builder().updateNodes(stateWithInitialized.routingTable().version(), routingNodesWithStarted)
.build())
.build();
assertThat(OperationRouting.computeTargetedShards(stateWithStarted, new String[]{"test"}, null), hasSize(startedShards.size()));
assertThat(OperationRouting.computeTargetedShards(stateWithStarted, new String[]{"test"}, Map.of("test", routingValues)).size(),
allOf(lessThanOrEqualTo(startedShards.size()), greaterThan(0)));
assertThat(OperationRouting.computeTargetedShards(stateWithStarted, new String[]{"test"}, Map.of("not_test", routingValues)),
hasSize(startedShards.size()));
}
}