Skip to content

Commit

Permalink
Merge branch 'main' into get-active-only-api-keys
Browse files Browse the repository at this point in the history
  • Loading branch information
elasticmachine authored Aug 8, 2023
2 parents f2522d6 + 36d60e2 commit 5177e1b
Show file tree
Hide file tree
Showing 23 changed files with 260 additions and 126 deletions.
5 changes: 5 additions & 0 deletions docs/changelog/98260.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 98260
summary: Avoid lifecycle NPE in the data stream lifecycle usage API
area: Data streams
type: bug
issues: []
13 changes: 8 additions & 5 deletions docs/reference/modules/discovery/bootstrapping.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,18 @@ node:
if it is not possible to use the `node.name` of the node and there are
multiple nodes sharing a single IP address.

IMPORTANT: After the cluster has formed, remove the `cluster.initial_master_nodes`
setting from each node's configuration. It should not be set for
master-ineligible nodes, master-eligible nodes joining an existing cluster, or
nodes which are restarting.
+
[IMPORTANT]
====
After the cluster has formed, remove the `cluster.initial_master_nodes` setting
from each node's configuration. It should not be set for master-ineligible
nodes, master-eligible nodes joining an existing cluster, or nodes which are
restarting.
If you leave `cluster.initial_master_nodes` in place once the cluster has
formed then there is a risk that a future misconfiguration may result in
bootstrapping a new cluster alongside your existing cluster. It may not be
possible to recover from this situation without losing data.
====

The simplest way to create a new cluster is for you to select one of your
master-eligible nodes that will bootstrap itself into a single-node cluster,
Expand Down
17 changes: 12 additions & 5 deletions docs/reference/query-dsl/rule-query.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,17 @@

preview::[]

Applies rules configured in the <<query-rules-apis, Query Rules API>> to the query.
Applies <<query-rules-apis,query rules>> to the query before returning results.
This feature is used to promote documents in the manner of a <<query-dsl-pinned-query>> based on matching defined rules.
If no matching query rules are defined, the "organic" matches for the query are returned.

[NOTE]
====
To use the rule query, you first need a defined set of query rules.
Use the <<query-rules-apis, query rules management APIs>> to create and manage query rules.
For more information and examples see <<search-using-query-rules>>.
====

==== Example request

////
Expand All @@ -26,8 +33,8 @@ PUT _query_rules/my-ruleset
"criteria": [
{
"type": "exact",
"metadata": "query_string",
"values": ["pugs"]
"metadata": "user_query",
"values": ["puggles"]
}
],
"actions": {
Expand All @@ -54,12 +61,12 @@ GET /_search
"query": {
"rule_query": {
"match_criteria": {
"query_string": "pugs"
"user_query": "pugs"
},
"ruleset_id": "my-ruleset",
"organic": {
"match": {
"description": "pugs"
"description": "puggles"
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
* Evaluates the automatic conditions and converts the whole configuration to XContent.
* For the automatic conditions is also adds the suffix [automatic]
*/
public XContentBuilder evaluateAndConvertToXContent(XContentBuilder builder, Params params, TimeValue retention) throws IOException {
public XContentBuilder evaluateAndConvertToXContent(XContentBuilder builder, Params params, @Nullable TimeValue retention)
throws IOException {
builder.startObject();
concreteConditions.toXContentFragment(builder, params);
for (String automaticCondition : automaticConditions) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -220,15 +220,39 @@ public boolean isInSortOrderExecutionRequired() {
}

/**
* Return false if this aggregation or any of the child aggregations does not support concurrent search
* Return false if this aggregation or any of the child aggregations does not support concurrent search.
* As a result, such aggregation will always be executed sequentially despite concurrency is enabled for the query phase.
* Note: aggregations that don't support concurrency, may or may not support offloading their collection to the search worker threads,
* depending on what {@link #supportsOffloadingSequentialCollection()} returns.
*/
public boolean supportsConcurrentExecution() {
if (isInSortOrderExecutionRequired()) {
return false;
}
for (AggregationBuilder builder : factoriesBuilder.getAggregatorFactories()) {
if (builder.supportsConcurrentExecution() == false) {
return false;
}
}
return isInSortOrderExecutionRequired() == false;
return supportsOffloadingSequentialCollection();
}

/**
* Returns false if this aggregation or any of its child aggregations does not support offloading its sequential collection
* to a separate thread. As a result, such aggregation will always be executed sequentially, and fully in the search thread,
* without offloading its collection to the search worker threads.
* Note: aggregations that don't support offloading sequential collection, don't support concurrency by definition.
*/
public boolean supportsOffloadingSequentialCollection() {
if (isInSortOrderExecutionRequired()) {
return false;
}
for (AggregationBuilder builder : factoriesBuilder.getAggregatorFactories()) {
if (builder.supportsOffloadingSequentialCollection() == false) {
return false;
}
}
return true;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ public boolean supportsSampling() {
}

@Override
public boolean supportsConcurrentExecution() {
public boolean supportsOffloadingSequentialCollection() {
return false;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ public BucketCardinality bucketCardinality() {
}

@Override
public boolean supportsConcurrentExecution() {
public boolean supportsOffloadingSequentialCollection() {
return false;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ public boolean supportsSampling() {
}

@Override
public boolean supportsConcurrentExecution() {
public boolean supportsOffloadingSequentialCollection() {
return false;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,6 @@ public class CardinalityAggregatorTests extends AggregatorTestCase {
/** Script to extract a collection of numeric values from the 'numbers' field **/
public static final String NUMERIC_VALUES_SCRIPT = "doc['numbers']";

public static final int HASHER_DEFAULT_SEED = 17;

@Override
protected ScriptService getMockScriptService() {
final Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1074,7 +1074,6 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() {
);
masterService = new AckedFakeThreadPoolMasterService(
localNode.getId(),
"test",
threadPool,
runnable -> deterministicTaskQueue.scheduleNow(onNode(runnable))
);
Expand Down Expand Up @@ -1722,12 +1721,7 @@ static class AckedFakeThreadPoolMasterService extends FakeThreadPoolMasterServic
AckCollector nextAckCollector = new AckCollector();
boolean publicationMayFail = false;

AckedFakeThreadPoolMasterService(
String nodeName,
String serviceName,
ThreadPool threadPool,
Consumer<Runnable> onTaskAvailableToRun
) {
AckedFakeThreadPoolMasterService(String nodeName, ThreadPool threadPool, Consumer<Runnable> onTaskAvailableToRun) {
super(nodeName, threadPool, onTaskAvailableToRun);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ protected void publish(
AckListener ackListener,
ActionListener<Void> publicationListener
) {
// fork the publication to add a little extra room for concurrent activity here
threadPool.generic().execute(threadPool.getThreadContext().preserveContext(new Runnable() {
// allow to fork the publication to add a little extra room for concurrent activity here
taskExecutor.accept(threadPool.getThreadContext().preserveContext(new Runnable() {
@Override
public void run() {
FakeThreadPoolMasterService.super.publish(clusterStatePublicationEvent, wrapAckListener(ackListener), publicationListener);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,25 @@ public void testFromXContent() throws IOException {
public void testSupportsConcurrentExecution() {
AB builder = createTestAggregatorBuilder();
boolean supportsConcurrency = builder.supportsConcurrentExecution();
if (supportsConcurrency) {
assertTrue(builder.supportsOffloadingSequentialCollection());
}
AggregationBuilder bucketBuilder = new HistogramAggregationBuilder("test");
assertThat(bucketBuilder.supportsConcurrentExecution(), equalTo(true));
assertTrue(bucketBuilder.supportsConcurrentExecution());
bucketBuilder.subAggregation(builder);
assertThat(bucketBuilder.supportsConcurrentExecution(), equalTo(supportsConcurrency));
if (bucketBuilder.supportsConcurrentExecution()) {
assertTrue(bucketBuilder.supportsOffloadingSequentialCollection());
}
}

public void testSupportsOffloadingSequentialCollection() {
AB builder = createTestAggregatorBuilder();
boolean supportsOffloadingSequentialCollection = builder.supportsOffloadingSequentialCollection();
AggregationBuilder bucketBuilder = new HistogramAggregationBuilder("test");
assertTrue(bucketBuilder.supportsOffloadingSequentialCollection());
bucketBuilder.subAggregation(builder);
assertThat(bucketBuilder.supportsOffloadingSequentialCollection(), equalTo(supportsOffloadingSequentialCollection));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,9 @@ permissions in this entry apply.

NOTE: No explicit <<security-privileges,privileges>> should be specified for either search
or replication access. The creation process automatically converts the `access` specification
to a role descriptor which has relevant privileges assigned accordingly.
to a <<api-key-role-descriptors,role descriptor>> which has relevant privileges assigned accordingly.
The `access` value as well as its corresponding `role_descriptors` are returned in responses of
<<security-api-get-api-key,Get API keys API>> and <<security-api-query-api-key,Query API keys API>>.

NOTE: Unlike <<api-key-role-descriptors,REST API keys>>, a cross-cluster API key
does not capture permissions of the authenticated user. The API key's effective
Expand Down Expand Up @@ -217,6 +219,24 @@ A successful call returns a JSON structure that contains the information of the
"enabled": true
}
}
},
"access": { <8>
"search": [
{
"names": [
"logs*"
],
"allow_restricted_indices": false
}
],
"replication": [
{
"names": [
"archive*"
],
"allow_restricted_indices": false
}
]
}
}
]
Expand All @@ -235,6 +255,7 @@ It is `cross_cluster_replication` if only cross-cluster replication is required.
Or both, if search and replication are required.
<6> The indices privileges corresponding to the required cross-cluster search access.
<7> The indices privileges corresponding to the required cross-cluster replication access.
<8> The `access` corresponds to the value specified at API key creation time.


To use the generated API key, configure it as the cluster credential as part of an API key based remote cluster configuration.
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,16 @@ A successful call returns a JSON structure that contains the information of the
"enabled": true
}
}
},
"access": { <2>
"search": [
{
"names": [
"logs*"
],
"allow_restricted_indices": false
}
]
}
}
]
Expand All @@ -169,6 +179,7 @@ A successful call returns a JSON structure that contains the information of the
// NOTCONSOLE
<1> Role descriptor corresponding to the specified `access` scope at creation time.
In this example, it grants cross cluster search permission for the `logs*` index pattern.
<2> The `access` corresponds to the value specified at API key creation time.


The following example updates the API key created above, assigning it new access scope and metadata:
Expand Down Expand Up @@ -243,6 +254,16 @@ and it will be:
"enabled": true
}
}
},
"access": { <2>
"replication": [
{
"names": [
"archive*"
],
"allow_restricted_indices": false
}
]
}
}
]
Expand All @@ -252,3 +273,4 @@ and it will be:
<1> Role descriptor is updated to be the `access` scope specified at update time.
In this example, it is updated to grant the cross cluster replication permission
for the `archive*` index pattern.
<2> The `access` corresponds to the value specified at API key update time.
Original file line number Diff line number Diff line change
Expand Up @@ -90,19 +90,24 @@ public void testAction() throws Exception {
Map<String, DataStream> dataStreamMap = new HashMap<>();
for (int dataStreamCount = 0; dataStreamCount < randomInt(200); dataStreamCount++) {
boolean hasLifecycle = randomBoolean();
long retentionMillis;
DataStreamLifecycle lifecycle;
if (hasLifecycle) {
retentionMillis = randomLongBetween(1000, 100000);
count.incrementAndGet();
totalRetentionTimes.addAndGet(retentionMillis);
if (retentionMillis < minRetention.get()) {
minRetention.set(retentionMillis);
}
if (retentionMillis > maxRetention.get()) {
maxRetention.set(retentionMillis);
if (randomBoolean()) {
lifecycle = new DataStreamLifecycle(null, null);
} else {
long retentionMillis = randomLongBetween(1000, 100000);
count.incrementAndGet();
totalRetentionTimes.addAndGet(retentionMillis);
if (retentionMillis < minRetention.get()) {
minRetention.set(retentionMillis);
}
if (retentionMillis > maxRetention.get()) {
maxRetention.set(retentionMillis);
}
lifecycle = DataStreamLifecycle.newBuilder().dataRetention(retentionMillis).build();
}
} else {
retentionMillis = 0;
lifecycle = null;
}
List<Index> indices = new ArrayList<>();
for (int indicesCount = 0; indicesCount < randomIntBetween(1, 10); indicesCount++) {
Expand All @@ -120,7 +125,7 @@ public void testAction() throws Exception {
systemDataStream,
randomBoolean(),
IndexMode.STANDARD,
hasLifecycle ? DataStreamLifecycle.newBuilder().dataRetention(retentionMillis).build() : null
lifecycle
);
dataStreamMap.put(dataStream.getName(), dataStream);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ protected void masterOperation(
final Collection<DataStream> dataStreams = state.metadata().dataStreams().values();
LongSummaryStatistics retentionStats = dataStreams.stream()
.filter(ds -> ds.getLifecycle() != null)
.filter(ds -> ds.getLifecycle().getEffectiveDataRetention() != null)
.collect(Collectors.summarizingLong(ds -> ds.getLifecycle().getEffectiveDataRetention().getMillis()));
long dataStreamsWithLifecycles = retentionStats.getCount();
long minRetention = dataStreamsWithLifecycles == 0 ? 0 : retentionStats.getMin();
Expand Down

This file was deleted.

Loading

0 comments on commit 5177e1b

Please sign in to comment.