Skip to content

Commit

Permalink
replacing blacklist by blocklist and whitelist by allowlist
Browse files Browse the repository at this point in the history
  • Loading branch information
aponb committed May 19, 2021
1 parent c116062 commit e3ddf3c
Show file tree
Hide file tree
Showing 110 changed files with 640 additions and 648 deletions.
2 changes: 1 addition & 1 deletion TESTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ The YAML REST tests support all the options provided by the randomized runner, p

- `tests.rest.suite`: comma separated paths of the test suites to be run (by default loaded from /rest-api-spec/test). It is possible to run only a subset of the tests providing a sub-folder or even a single yaml file (the default /rest-api-spec/test prefix is optional when files are loaded from classpath) e.g. `-Dtests.rest.suite=index,get,create/10_with_id`

- `tests.rest.blacklist`: comma separated globs that identify tests that are blacklisted and need to be skipped e.g. `-Dtests.rest.blacklist=index/**/Index document,get/10_basic/**`
- `tests.rest.blocklist`: comma separated globs that identify tests that are blocklisted and need to be skipped e.g. `-Dtests.rest.blocklist=index/**/Index document,get/10_basic/**`

Java REST tests can be run with the "javaRestTest" task.

Expand Down
2 changes: 1 addition & 1 deletion client/rest-high-level/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ check.dependsOn(asyncIntegTest)
testClusters.all {
testDistribution = 'ARCHIVE'
systemProperty 'opensearch.scripting.update.ctx_in_params', 'false'
setting 'reindex.remote.whitelist', '[ "[::1]:*", "127.0.0.1:*" ]'
setting 'reindex.remote.allowlist', '[ "[::1]:*", "127.0.0.1:*" ]'

extraConfigFile 'roles.yml', file('roles.yml')
user username: System.getProperty('tests.rest.cluster.username', 'test_user'),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public interface NodeSelector {
* iterate the nodes as many times as they need.
* <p>
* This may be called twice per request: first for "living" nodes that
* have not been blacklisted by previous errors. If the selector removes
* have not been blocklisted by previous errors. If the selector removes
* all nodes from the list or if there aren't any living nodes then the
* {@link RestClient} will call this method with a list of "dead" nodes.
* <p>
Expand Down
30 changes: 15 additions & 15 deletions client/rest/src/main/java/org/opensearch/client/RestClient.java
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ public class RestClient implements Closeable {
final List<Header> defaultHeaders;
private final String pathPrefix;
private final AtomicInteger lastNodeIndex = new AtomicInteger(0);
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
private final ConcurrentMap<HttpHost, DeadHostState> blocklist = new ConcurrentHashMap<>();
private final FailureListener failureListener;
private final NodeSelector nodeSelector;
private volatile NodeTuple<List<Node>> nodeTuple;
Expand Down Expand Up @@ -240,7 +240,7 @@ public synchronized void setNodes(Collection<Node> nodes) {
}
this.nodeTuple = new NodeTuple<>(
Collections.unmodifiableList(new ArrayList<>(nodesByHost.values())), authCache);
this.blacklist.clear();
this.blocklist.clear();
}

/**
Expand Down Expand Up @@ -441,23 +441,23 @@ public void cancelled() {
*/
private NodeTuple<Iterator<Node>> nextNodes() throws IOException {
NodeTuple<List<Node>> nodeTuple = this.nodeTuple;
Iterable<Node> hosts = selectNodes(nodeTuple, blacklist, lastNodeIndex, nodeSelector);
Iterable<Node> hosts = selectNodes(nodeTuple, blocklist, lastNodeIndex, nodeSelector);
return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache);
}

/**
* Select nodes to try and sorts them so that the first one will be tried initially, then the following ones
* if the previous attempt failed and so on. Package private for testing.
*/
static Iterable<Node> selectNodes(NodeTuple<List<Node>> nodeTuple, Map<HttpHost, DeadHostState> blacklist,
static Iterable<Node> selectNodes(NodeTuple<List<Node>> nodeTuple, Map<HttpHost, DeadHostState> blocklist,
AtomicInteger lastNodeIndex, NodeSelector nodeSelector) throws IOException {
/*
* Sort the nodes into living and dead lists.
*/
List<Node> livingNodes = new ArrayList<>(Math.max(0, nodeTuple.nodes.size() - blacklist.size()));
List<DeadNode> deadNodes = new ArrayList<>(blacklist.size());
List<Node> livingNodes = new ArrayList<>(Math.max(0, nodeTuple.nodes.size() - blocklist.size()));
List<DeadNode> deadNodes = new ArrayList<>(blocklist.size());
for (Node node : nodeTuple.nodes) {
DeadHostState deadness = blacklist.get(node.getHost());
DeadHostState deadness = blocklist.get(node.getHost());
if (deadness == null || deadness.shallBeRetried()) {
livingNodes.add(node);
} else {
Expand Down Expand Up @@ -514,9 +514,9 @@ static Iterable<Node> selectNodes(NodeTuple<List<Node>> nodeTuple, Map<HttpHost,
* Receives as an argument the host that was used for the successful request.
*/
private void onResponse(Node node) {
DeadHostState removedHost = this.blacklist.remove(node.getHost());
DeadHostState removedHost = this.blocklist.remove(node.getHost());
if (logger.isDebugEnabled() && removedHost != null) {
logger.debug("removed [" + node + "] from blacklist");
logger.debug("removed [" + node + "] from blocklist");
}
}

Expand All @@ -527,17 +527,17 @@ private void onResponse(Node node) {
private void onFailure(Node node) {
while(true) {
DeadHostState previousDeadHostState =
blacklist.putIfAbsent(node.getHost(), new DeadHostState(DeadHostState.DEFAULT_TIME_SUPPLIER));
blocklist.putIfAbsent(node.getHost(), new DeadHostState(DeadHostState.DEFAULT_TIME_SUPPLIER));
if (previousDeadHostState == null) {
if (logger.isDebugEnabled()) {
logger.debug("added [" + node + "] to blacklist");
logger.debug("added [" + node + "] to blocklist");
}
break;
}
if (blacklist.replace(node.getHost(), previousDeadHostState,
if (blocklist.replace(node.getHost(), previousDeadHostState,
new DeadHostState(previousDeadHostState))) {
if (logger.isDebugEnabled()) {
logger.debug("updated [" + node + "] already in blacklist");
logger.debug("updated [" + node + "] already in blocklist");
}
break;
}
Expand Down Expand Up @@ -705,8 +705,8 @@ static class NodeTuple<T> {
}

/**
* Contains a reference to a blacklisted node and the time until it is
* revived. We use this so we can do a single pass over the blacklist.
* Contains a reference to a blocklisted node and the time until it is
* revived. We use this so we can do a single pass over the blocklist.
*/
private static class DeadNode implements Comparable<DeadNode> {
final Node node;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
import static org.junit.Assert.fail;

/**
* Tests for {@link RestClient} behaviour against multiple hosts: fail-over, blacklisting etc.
* Tests for {@link RestClient} behaviour against multiple hosts: fail-over, blocklisting etc.
* Relies on a mock http client to intercept requests and return desired responses based on request path.
*/
public class RestClientMultipleHostsTests extends RestClientTestCase {
Expand Down Expand Up @@ -150,7 +150,7 @@ public void testRoundRobinRetryErrors() throws Exception {
fail("request should have failed");
} catch (ResponseException e) {
Set<HttpHost> hostsSet = hostsSet();
//first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
//first request causes all the hosts to be blocklisted, the returned exception holds one suppressed exception each
failureListener.assertCalled(nodes);
do {
Response response = e.getResponse();
Expand All @@ -169,7 +169,7 @@ public void testRoundRobinRetryErrors() throws Exception {
assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
} catch (IOException e) {
Set<HttpHost> hostsSet = hostsSet();
//first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
//first request causes all the hosts to be blocklisted, the returned exception holds one suppressed exception each
failureListener.assertCalled(nodes);
do {
HttpHost httpHost = HttpHost.create(e.getMessage());
Expand Down Expand Up @@ -201,13 +201,13 @@ public void testRoundRobinRetryErrors() throws Exception {
assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1))));
assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times",
hostsSet.remove(response.getHost()));
//after the first request, all hosts are blacklisted, a single one gets resurrected each time
//after the first request, all hosts are blocklisted, a single one gets resurrected each time
failureListener.assertCalled(response.getHost());
assertEquals(0, e.getSuppressed().length);
} catch (IOException e) {
HttpHost httpHost = HttpHost.create(e.getMessage());
assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
//after the first request, all hosts are blacklisted, a single one gets resurrected each time
//after the first request, all hosts are blocklisted, a single one gets resurrected each time
failureListener.assertCalled(httpHost);
assertEquals(0, e.getSuppressed().length);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,11 +263,11 @@ public String toString() {

NodeTuple<List<Node>> nodeTuple = new NodeTuple<>(Arrays.asList(n1, n2, n3), null);

Map<HttpHost, DeadHostState> emptyBlacklist = Collections.emptyMap();
Map<HttpHost, DeadHostState> emptyBlocklist = Collections.emptyMap();

// Normal cases where the node selector doesn't reject all living nodes
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, not1);
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlocklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlocklist, not1);

/*
* Try a NodeSelector that excludes all nodes. This should
Expand All @@ -277,80 +277,80 @@ public String toString() {
String message = "NodeSelector [NONE] rejected all nodes, living ["
+ "[host=http://1, version=1], [host=http://2, version=2], "
+ "[host=http://3, version=3]] and dead []";
assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, noNodes));
assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlocklist, noNodes));
}

// Mark all the nodes dead for a few test cases
{
final AtomicLong time = new AtomicLong(0L);
Supplier<Long> timeSupplier = time::get;
Map<HttpHost, DeadHostState> blacklist = new HashMap<>();
blacklist.put(n1.getHost(), new DeadHostState(timeSupplier));
blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier)));
blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier))));
Map<HttpHost, DeadHostState> blocklist = new HashMap<>();
blocklist.put(n1.getHost(), new DeadHostState(timeSupplier));
blocklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier)));
blocklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier))));

/*
* case when fewer nodeTuple than blacklist, won't result in any IllegalCapacityException
* case when fewer nodeTuple than blocklist, won't result in any IllegalCapacityException
*/
{
NodeTuple<List<Node>> fewerNodeTuple = new NodeTuple<>(Arrays.asList(n1, n2), null);
assertSelectLivingHosts(Arrays.asList(n1), fewerNodeTuple, blacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2), fewerNodeTuple, blacklist, not1);
assertSelectLivingHosts(Arrays.asList(n1), fewerNodeTuple, blocklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2), fewerNodeTuple, blocklist, not1);
}

/*
* selectHosts will revive a single host regardless of
* blacklist time. It'll revive the node that is closest
* blocklist time. It'll revive the node that is closest
* to being revived that the NodeSelector is ok with.
*/
assertEquals(singletonList(n1), RestClient.selectNodes(nodeTuple, blacklist, new AtomicInteger(), NodeSelector.ANY));
assertEquals(singletonList(n2), RestClient.selectNodes(nodeTuple, blacklist, new AtomicInteger(), not1));
assertEquals(singletonList(n1), RestClient.selectNodes(nodeTuple, blocklist, new AtomicInteger(), NodeSelector.ANY));
assertEquals(singletonList(n2), RestClient.selectNodes(nodeTuple, blocklist, new AtomicInteger(), not1));

/*
* Try a NodeSelector that excludes all nodes. This should
* return a failure, but a different failure than when the
* blacklist is empty so that the caller knows that all of
* their nodes are blacklisted AND blocked.
* blocklist is empty so that the caller knows that all of
* their nodes are blocklisted AND blocked.
*/
String message = "NodeSelector [NONE] rejected all nodes, living [] and dead ["
+ "[host=http://1, version=1], [host=http://2, version=2], "
+ "[host=http://3, version=3]]";
assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, noNodes));
assertEquals(message, assertSelectAllRejected(nodeTuple, blocklist, noNodes));

/*
* Now lets wind the clock forward, past the timeout for one of
* the dead nodes. We should return it.
*/
time.set(new DeadHostState(timeSupplier).getDeadUntilNanos());
assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blocklist, NodeSelector.ANY);

/*
* But if the NodeSelector rejects that node then we'll pick the
* first on that the NodeSelector doesn't reject.
*/
assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, not1);
assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blocklist, not1);

/*
* If we wind the clock way into the future, past any of the
* blacklist timeouts then we function as though the nodes aren't
* in the blacklist at all.
* blocklist timeouts then we function as though the nodes aren't
* in the blocklist at all.
*/
time.addAndGet(DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS);
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, not1);
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blocklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blocklist, not1);
}
}

private void assertSelectLivingHosts(List<Node> expectedNodes, NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist, NodeSelector nodeSelector) throws IOException {
Map<HttpHost, DeadHostState> blocklist, NodeSelector nodeSelector) throws IOException {
int iterations = 1000;
AtomicInteger lastNodeIndex = new AtomicInteger(0);
assertEquals(expectedNodes, RestClient.selectNodes(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
assertEquals(expectedNodes, RestClient.selectNodes(nodeTuple, blocklist, lastNodeIndex, nodeSelector));
// Calling it again rotates the set of results
for (int i = 1; i < iterations; i++) {
Collections.rotate(expectedNodes, 1);
assertEquals("iteration " + i, expectedNodes,
RestClient.selectNodes(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
RestClient.selectNodes(nodeTuple, blocklist, lastNodeIndex, nodeSelector));
}
}

Expand All @@ -359,9 +359,9 @@ private void assertSelectLivingHosts(List<Node> expectedNodes, NodeTuple<List<No
* @return the message in the exception thrown by the failure
*/
private static String assertSelectAllRejected( NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist, NodeSelector nodeSelector) {
Map<HttpHost, DeadHostState> blocklist, NodeSelector nodeSelector) {
try {
RestClient.selectNodes(nodeTuple, blacklist, new AtomicInteger(0), nodeSelector);
RestClient.selectNodes(nodeTuple, blocklist, new AtomicInteger(0), nodeSelector);
throw new AssertionError("expected selectHosts to fail");
} catch (IOException e) {
return e.getMessage();
Expand Down
2 changes: 1 addition & 1 deletion gradle/formatting.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def projectPathsToExclude = [
':example-plugins:custom-settings',
':example-plugins:custom-significance-heuristic',
':example-plugins:custom-suggester',
':example-plugins:painless-whitelist',
':example-plugins:painless-allowlist',
':example-plugins:rescore',
':example-plugins:rest-handler',
':example-plugins:script-expert-scoring',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
* a thread must have {@code modifyThread} to even terminate its own pool, leaving
* system threads unprotected.
* </ul>
* This class throws exception on {@code exitVM} calls, and provides a whitelist where calls
* This class throws exception on {@code exitVM} calls, and provides an allowlist where calls
* from exit are allowed.
* <p>
* Additionally it enforces threadgroup security with the following rules:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@
package org.opensearch.analysis.common;

import org.opensearch.painless.spi.PainlessExtension;
import org.opensearch.painless.spi.Whitelist;
import org.opensearch.painless.spi.WhitelistLoader;
import org.opensearch.painless.spi.Allowlist;
import org.opensearch.painless.spi.AllowlistLoader;
import org.opensearch.script.ScriptContext;

import java.util.Collections;
Expand All @@ -43,11 +43,11 @@

public class AnalysisPainlessExtension implements PainlessExtension {

private static final Whitelist WHITELIST =
WhitelistLoader.loadFromResourceFiles(AnalysisPainlessExtension.class, "painless_whitelist.txt");
private static final Allowlist ALLOWLIST =
AllowlistLoader.loadFromResourceFiles(AnalysisPainlessExtension.class, "painless_allowlist.txt");

@Override
public Map<ScriptContext<?>, List<Whitelist>> getContextWhitelists() {
return Collections.singletonMap(AnalysisPredicateScript.CONTEXT, Collections.singletonList(WHITELIST));
public Map<ScriptContext<?>, List<Allowlist>> getContextAllowlists() {
return Collections.singletonMap(AnalysisPredicateScript.CONTEXT, Collections.singletonList(ALLOWLIST));
}
}
Loading

0 comments on commit e3ddf3c

Please sign in to comment.