diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 110982e31e661..05e07049695a0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -831,6 +831,9 @@ class BuildPlugin implements Plugin { // TODO: remove this once ctx isn't added to update script params in 7.0 systemProperty 'es.scripting.update.ctx_in_params', 'false' + //TODO: remove this once the cname is prepended to the address by default in 7.0 + systemProperty 'es.http.cname_in_publish_address', 'true' + // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM if (project.inFipsJvm) { systemProperty 'javax.net.ssl.trustStorePassword', 'password' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 386457146685f..914bae4d2c871 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 8.0.0-snapshot-4d78db26be +lucene = 8.0.0-snapshot-66c671ea80 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index ecbe7f2d3a5d3..09c587cf81f23 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -28,10 +28,12 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobStatsRequest; @@ -39,6 +41,7 @@ import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.common.Strings; @@ -180,6 +183,38 @@ static Request updateJob(UpdateJobRequest updateJobRequest) throws IOException { return request; } + static Request putDatafeed(PutDatafeedRequest putDatafeedRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("datafeeds") + .addPathPart(putDatafeedRequest.getDatafeed().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putDatafeedRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(deleteForecastRequest.getJobId()) + .addPathPartAsIs("_forecast") + .addPathPart(Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds())) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + if (deleteForecastRequest.isAllowNoForecasts() != null) { + params.putParam("allow_no_forecasts", Boolean.toString(deleteForecastRequest.isAllowNoForecasts())); + } + if (deleteForecastRequest.timeout() != null) { + params.putParam("timeout", deleteForecastRequest.timeout().getStringRep()); + } + return request; + } + static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -194,6 +229,20 @@ static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOExceptio return request; } + static Request getCategories(GetCategoriesRequest getCategoriesRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(getCategoriesRequest.getJobId()) + .addPathPartAsIs("results") + .addPathPartAsIs("categories") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getCategoriesRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getOverallBuckets(GetOverallBucketsRequest getOverallBucketsRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 85c5771f3450b..79f9267c94d18 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -19,19 +19,20 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.ml.ForecastJobRequest; -import org.elasticsearch.client.ml.ForecastJobResponse; -import org.elasticsearch.client.ml.PostDataRequest; -import org.elasticsearch.client.ml.PostDataResponse; -import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetJobRequest; @@ -44,13 +45,19 @@ import org.elasticsearch.client.ml.GetRecordsResponse; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutDatafeedResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.job.stats.JobStats; import java.io.IOException; import java.util.Collections; + /** * Machine Learning API client wrapper for the {@link RestHighLevelClient} * @@ -387,6 +394,11 @@ public ForecastJobResponse forecastJob(ForecastJobRequest request, RequestOption /** * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} * + *

+ * For additional info + * see + *

+ * * @param request the {@link UpdateJobRequest} object enclosing the desired updates * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return a PutJobResponse object containing the updated job object @@ -425,6 +437,10 @@ public void forecastJobAsync(ForecastJobRequest request, RequestOptions options, /** * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} asynchronously * + *

+ * For additional info + * see + *

* @param request the {@link UpdateJobRequest} object enclosing the desired updates * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion @@ -438,6 +454,86 @@ public void updateJobAsync(UpdateJobRequest request, RequestOptions options, Act Collections.emptySet()); } + /** + * Creates a new Machine Learning Datafeed + *

+ * For additional info + * see ML PUT datafeed documentation + * + * @param request The PutDatafeedRequest containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return PutDatafeedResponse with enclosed {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} object + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutDatafeedResponse putDatafeed(PutDatafeedRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::putDatafeed, + options, + PutDatafeedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Creates a new Machine Learning Datafeed asynchronously and notifies listener on completion + *

+ * For additional info + * see ML PUT datafeed documentation + * + * @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::putDatafeed, + options, + PutDatafeedResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Deletes Machine Learning Job Forecasts + * + *

+ * For additional info + * see + *

+ * + * @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return a AcknowledgedResponse object indicating request success + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public AcknowledgedResponse deleteForecast(DeleteForecastRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::deleteForecast, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Deletes Machine Learning Job Forecasts asynchronously + * + *

+ * For additional info + * see + *

+ * + * @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void deleteForecastAsync(DeleteForecastRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::deleteForecast, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets the buckets for a Machine Learning Job. *

@@ -474,6 +570,45 @@ public void getBucketsAsync(GetBucketsRequest request, RequestOptions options, A Collections.emptySet()); } + /** + * Gets the categories for a Machine Learning Job. + *

+ * For additional info + * see + * ML GET categories documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetCategoriesResponse getCategories(GetCategoriesRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getCategories, + options, + GetCategoriesResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the categories for a Machine Learning Job, notifies listener once the requested buckets are retrieved. + *

+ * For additional info + * see + * ML GET categories documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getCategoriesAsync(GetCategoriesRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getCategories, + options, + GetCategoriesResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets overall buckets for a set of Machine Learning Jobs. *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java new file mode 100644 index 0000000000000..f7c8a6c0733f8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java @@ -0,0 +1,183 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * POJO for a delete forecast request + */ +public class DeleteForecastRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField FORECAST_ID = new ParseField("forecast_id"); + public static final ParseField ALLOW_NO_FORECASTS = new ParseField("allow_no_forecasts"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final String ALL = "_all"; + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("delete_forecast_request", (a) -> new DeleteForecastRequest((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareStringOrNull( + (c, p) -> c.setForecastIds(Strings.commaDelimitedListToStringArray(p)), FORECAST_ID); + PARSER.declareBoolean(DeleteForecastRequest::setAllowNoForecasts, ALLOW_NO_FORECASTS); + PARSER.declareString(DeleteForecastRequest::timeout, TIMEOUT); + } + + /** + * Create a new {@link DeleteForecastRequest} that explicitly deletes all forecasts + * + * @param jobId the jobId of the Job whose forecasts to delete + */ + public static DeleteForecastRequest deleteAllForecasts(String jobId) { + DeleteForecastRequest request = new DeleteForecastRequest(jobId); + request.setForecastIds(ALL); + return request; + } + + private final String jobId; + private List forecastIds = new ArrayList<>(); + private Boolean allowNoForecasts; + private TimeValue timeout; + + /** + * Create a new DeleteForecastRequest for the given Job ID + * + * @param jobId the jobId of the Job whose forecast(s) to delete + */ + public DeleteForecastRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public List getForecastIds() { + return forecastIds; + } + + /** + * The forecast IDs to delete. Can be also be {@link DeleteForecastRequest#ALL} to explicitly delete ALL forecasts + * + * @param forecastIds forecast IDs to delete + */ + public void setForecastIds(String... forecastIds) { + setForecastIds(Arrays.asList(forecastIds)); + } + + void setForecastIds(List forecastIds) { + if (forecastIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("forecastIds must not contain null values"); + } + this.forecastIds = new ArrayList<>(forecastIds); + } + + public Boolean isAllowNoForecasts() { + return allowNoForecasts; + } + + /** + * Sets the `allow_no_forecasts` field. + * + * @param allowNoForecasts when {@code true} no error is thrown when {@link DeleteForecastRequest#ALL} does not find any forecasts + */ + public void setAllowNoForecasts(boolean allowNoForecasts) { + this.allowNoForecasts = allowNoForecasts; + } + + /** + * Allows to set the timeout + * @param timeout timeout as a string (e.g. 1s) + */ + public void timeout(String timeout) { + this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout"); + } + + /** + * Allows to set the timeout + * @param timeout timeout as a {@link TimeValue} + */ + public void timeout(TimeValue timeout) { + this.timeout = timeout; + } + + public TimeValue timeout() { + return timeout; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DeleteForecastRequest that = (DeleteForecastRequest) other; + return Objects.equals(jobId, that.jobId) && + Objects.equals(forecastIds, that.forecastIds) && + Objects.equals(allowNoForecasts, that.allowNoForecasts) && + Objects.equals(timeout, that.timeout); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, forecastIds, allowNoForecasts, timeout); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (forecastIds != null) { + builder.field(FORECAST_ID.getPreferredName(), Strings.collectionToCommaDelimitedString(forecastIds)); + } + if (allowNoForecasts != null) { + builder.field(ALLOW_NO_FORECASTS.getPreferredName(), allowNoForecasts); + } + if (timeout != null) { + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java new file mode 100644 index 0000000000000..4fc68793f0060 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to retrieve categories of a given job + */ +public class GetCategoriesRequest extends ActionRequest implements ToXContentObject { + + + public static final ParseField CATEGORY_ID = new ParseField("category_id"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_categories_request", a -> new GetCategoriesRequest((String) a[0])); + + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareLong(GetCategoriesRequest::setCategoryId, CATEGORY_ID); + PARSER.declareObject(GetCategoriesRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + private final String jobId; + private Long categoryId; + private PageParams pageParams; + + /** + * Constructs a request to retrieve category information from a given job + * @param jobId id of the job from which to retrieve results + */ + public GetCategoriesRequest(String jobId) { + this.jobId = Objects.requireNonNull(jobId); + } + + public String getJobId() { + return jobId; + } + + public PageParams getPageParams() { + return pageParams; + } + + public Long getCategoryId() { + return categoryId; + } + + /** + * Sets the category id + * @param categoryId the category id + */ + public void setCategoryId(Long categoryId) { + this.categoryId = categoryId; + } + + /** + * Sets the paging parameters + * @param pageParams the paging parameters + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (categoryId != null) { + builder.field(CATEGORY_ID.getPreferredName(), categoryId); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetCategoriesRequest request = (GetCategoriesRequest) obj; + return Objects.equals(jobId, request.jobId) + && Objects.equals(categoryId, request.categoryId) + && Objects.equals(pageParams, request.pageParams); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, categoryId, pageParams); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesResponse.java new file mode 100644 index 0000000000000..3d3abe00bfb62 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesResponse.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.CategoryDefinition; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A response containing the requested categories + */ +public class GetCategoriesResponse extends AbstractResultResponse { + + public static final ParseField CATEGORIES = new ParseField("categories"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("get_categories_response", true, + a -> new GetCategoriesResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), CategoryDefinition.PARSER, CATEGORIES); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT); + } + + public static GetCategoriesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + GetCategoriesResponse(List categories, long count) { + super(CATEGORIES, categories, count); + } + + /** + * The retrieved categories + * @return the retrieved categories + */ + public List categories() { + return results; + } + + @Override + public int hashCode() { + return Objects.hash(count, results); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetCategoriesResponse other = (GetCategoriesResponse) obj; + return count == other.count && Objects.equals(results, other.results); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedRequest.java new file mode 100644 index 0000000000000..34cb12599a612 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedRequest.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request to create a new Machine Learning Datafeed given a {@link DatafeedConfig} configuration + */ +public class PutDatafeedRequest extends ActionRequest implements ToXContentObject { + + private final DatafeedConfig datafeed; + + /** + * Construct a new PutDatafeedRequest + * + * @param datafeed a {@link DatafeedConfig} configuration to create + */ + public PutDatafeedRequest(DatafeedConfig datafeed) { + this.datafeed = datafeed; + } + + public DatafeedConfig getDatafeed() { + return datafeed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return datafeed.toXContent(builder, params); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PutDatafeedRequest request = (PutDatafeedRequest) object; + return Objects.equals(datafeed, request.datafeed); + } + + @Override + public int hashCode() { + return Objects.hash(datafeed); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedResponse.java new file mode 100644 index 0000000000000..fa9862fd3b978 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutDatafeedResponse.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response containing the newly created {@link DatafeedConfig} + */ +public class PutDatafeedResponse implements ToXContentObject { + + private DatafeedConfig datafeed; + + public static PutDatafeedResponse fromXContent(XContentParser parser) throws IOException { + return new PutDatafeedResponse(DatafeedConfig.PARSER.parse(parser, null).build()); + } + + PutDatafeedResponse(DatafeedConfig datafeed) { + this.datafeed = datafeed; + } + + public DatafeedConfig getResponse() { + return datafeed; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + datafeed.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + PutDatafeedResponse response = (PutDatafeedResponse) object; + return Objects.equals(datafeed, response.datafeed); + } + + @Override + public int hashCode() { + return Objects.hash(datafeed); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java index 752752b103885..84deae61f8e62 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java @@ -20,36 +20,37 @@ import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Map; import java.util.Objects; /** - * Datafeed configuration options pojo. Describes where to proactively pull input - * data from. - *

- * If a value has not been set it will be null. Object wrappers are - * used around integral types and booleans so they can take null - * values. + * The datafeed configuration object. It specifies which indices + * to get the data from and offers parameters for customizing different + * aspects of the process. */ public class DatafeedConfig implements ToXContentObject { - public static final int DEFAULT_SCROLL_SIZE = 1000; - public static final ParseField ID = new ParseField("datafeed_id"); public static final ParseField QUERY_DELAY = new ParseField("query_delay"); public static final ParseField FREQUENCY = new ParseField("frequency"); @@ -59,7 +60,6 @@ public class DatafeedConfig implements ToXContentObject { public static final ParseField QUERY = new ParseField("query"); public static final ParseField SCROLL_SIZE = new ParseField("scroll_size"); public static final ParseField AGGREGATIONS = new ParseField("aggregations"); - public static final ParseField AGGS = new ParseField("aggs"); public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); @@ -77,9 +77,8 @@ public class DatafeedConfig implements ToXContentObject { builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); PARSER.declareString((builder, val) -> builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); - PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); + PARSER.declareField(Builder::setQuery, DatafeedConfig::parseBytes, QUERY, ObjectParser.ValueType.OBJECT); + PARSER.declareField(Builder::setAggregations, DatafeedConfig::parseBytes, AGGREGATIONS, ObjectParser.ValueType.OBJECT); PARSER.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -91,29 +90,26 @@ public class DatafeedConfig implements ToXContentObject { PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG); } + private static BytesReference parseBytes(XContentParser parser) throws IOException { + XContentBuilder contentBuilder = JsonXContent.contentBuilder(); + contentBuilder.generator().copyCurrentStructure(parser); + return BytesReference.bytes(contentBuilder); + } + private final String id; private final String jobId; - - /** - * The delay before starting to query a period of time - */ private final TimeValue queryDelay; - - /** - * The frequency with which queries are executed - */ private final TimeValue frequency; - private final List indices; private final List types; - private final QueryBuilder query; - private final AggregatorFactories.Builder aggregations; + private final BytesReference query; + private final BytesReference aggregations; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, - QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + BytesReference query, BytesReference aggregations, List scriptFields, Integer scrollSize, ChunkingConfig chunkingConfig) { this.id = id; this.jobId = jobId; @@ -156,11 +152,11 @@ public Integer getScrollSize() { return scrollSize; } - public QueryBuilder getQuery() { + public BytesReference getQuery() { return query; } - public AggregatorFactories.Builder getAggregations() { + public BytesReference getAggregations() { return aggregations; } @@ -183,11 +179,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (frequency != null) { builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); } - builder.field(INDICES.getPreferredName(), indices); - builder.field(TYPES.getPreferredName(), types); - builder.field(QUERY.getPreferredName(), query); + if (indices != null) { + builder.field(INDICES.getPreferredName(), indices); + } + if (types != null) { + builder.field(TYPES.getPreferredName(), types); + } + if (query != null) { + builder.field(QUERY.getPreferredName(), asMap(query)); + } if (aggregations != null) { - builder.field(AGGREGATIONS.getPreferredName(), aggregations); + builder.field(AGGREGATIONS.getPreferredName(), asMap(aggregations)); } if (scriptFields != null) { builder.startObject(SCRIPT_FIELDS.getPreferredName()); @@ -196,7 +198,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } - builder.field(SCROLL_SIZE.getPreferredName(), scrollSize); + if (scrollSize != null) { + builder.field(SCROLL_SIZE.getPreferredName(), scrollSize); + } if (chunkingConfig != null) { builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig); } @@ -205,10 +209,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private static Map asMap(BytesReference bytesReference) { + return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2(); + } + /** * The lists of indices and types are compared for equality but they are not * sorted first so this test could fail simply because the indices and types * lists are in different orders. + * + * Also note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to correctly + * compare them. */ @Override public boolean equals(Object other) { @@ -228,31 +240,40 @@ public boolean equals(Object other) { && Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.indices, that.indices) && Objects.equals(this.types, that.types) - && Objects.equals(this.query, that.query) + && Objects.equals(asMap(this.query), asMap(that.query)) && Objects.equals(this.scrollSize, that.scrollSize) - && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig); } + /** + * Note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to + * compute a stable hash code. + */ @Override public int hashCode() { - return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields, chunkingConfig); } + public static Builder builder(String id, String jobId) { + return new Builder(id, jobId); + } + public static class Builder { private String id; private String jobId; private TimeValue queryDelay; private TimeValue frequency; - private List indices = Collections.emptyList(); - private List types = Collections.emptyList(); - private QueryBuilder query = QueryBuilders.matchAllQuery(); - private AggregatorFactories.Builder aggregations; + private List indices; + private List types; + private BytesReference query; + private BytesReference aggregations; private List scriptFields; - private Integer scrollSize = DEFAULT_SCROLL_SIZE; + private Integer scrollSize; private ChunkingConfig chunkingConfig; public Builder(String id, String jobId) { @@ -279,8 +300,12 @@ public Builder setIndices(List indices) { return this; } + public Builder setIndices(String... indices) { + return setIndices(Arrays.asList(indices)); + } + public Builder setTypes(List types) { - this.types = Objects.requireNonNull(types, TYPES.getPreferredName()); + this.types = types; return this; } @@ -294,16 +319,36 @@ public Builder setFrequency(TimeValue frequency) { return this; } - public Builder setQuery(QueryBuilder query) { - this.query = Objects.requireNonNull(query, QUERY.getPreferredName()); + private Builder setQuery(BytesReference query) { + this.query = query; + return this; + } + + public Builder setQuery(String queryAsJson) { + this.query = queryAsJson == null ? null : new BytesArray(queryAsJson); + return this; + } + + public Builder setQuery(QueryBuilder query) throws IOException { + this.query = query == null ? null : xContentToBytes(query); return this; } - public Builder setAggregations(AggregatorFactories.Builder aggregations) { + private Builder setAggregations(BytesReference aggregations) { this.aggregations = aggregations; return this; } + public Builder setAggregations(String aggsAsJson) { + this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson); + return this; + } + + public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException { + this.aggregations = aggregations == null ? null : xContentToBytes(aggregations); + return this; + } + public Builder setScriptFields(List scriptFields) { List sorted = new ArrayList<>(scriptFields); sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); @@ -325,5 +370,12 @@ public DatafeedConfig build() { return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, chunkingConfig); } + + private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + object.toXContent(builder, ToXContentObject.EMPTY_PARAMS); + return BytesReference.bytes(builder); + } + } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java index 184d5d51481fa..1e59ea067ca7b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java @@ -20,12 +20,17 @@ import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -35,6 +40,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -58,11 +64,9 @@ public class DatafeedUpdate implements ToXContentObject { TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY); PARSER.declareString((builder, val) -> builder.setFrequency( TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY); - PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGREGATIONS); - PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), - DatafeedConfig.AGGS); + PARSER.declareField(Builder::setQuery, DatafeedUpdate::parseBytes, DatafeedConfig.QUERY, ObjectParser.ValueType.OBJECT); + PARSER.declareField(Builder::setAggregations, DatafeedUpdate::parseBytes, DatafeedConfig.AGGREGATIONS, + ObjectParser.ValueType.OBJECT); PARSER.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -74,20 +78,26 @@ public class DatafeedUpdate implements ToXContentObject { PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG); } + private static BytesReference parseBytes(XContentParser parser) throws IOException { + XContentBuilder contentBuilder = JsonXContent.contentBuilder(); + contentBuilder.generator().copyCurrentStructure(parser); + return BytesReference.bytes(contentBuilder); + } + private final String id; private final String jobId; private final TimeValue queryDelay; private final TimeValue frequency; private final List indices; private final List types; - private final QueryBuilder query; - private final AggregatorFactories.Builder aggregations; + private final BytesReference query; + private final BytesReference aggregations; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, - QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + BytesReference query, BytesReference aggregations, List scriptFields, Integer scrollSize, ChunkingConfig chunkingConfig) { this.id = id; this.jobId = jobId; @@ -121,9 +131,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep()); } addOptionalField(builder, DatafeedConfig.INDICES, indices); + if (query != null) { + builder.field(DatafeedConfig.QUERY.getPreferredName(), asMap(query)); + } + if (aggregations != null) { + builder.field(DatafeedConfig.AGGREGATIONS.getPreferredName(), asMap(aggregations)); + } addOptionalField(builder, DatafeedConfig.TYPES, types); - addOptionalField(builder, DatafeedConfig.QUERY, query); - addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations); if (scriptFields != null) { builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName()); for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { @@ -167,11 +181,11 @@ public Integer getScrollSize() { return scrollSize; } - public QueryBuilder getQuery() { + public BytesReference getQuery() { return query; } - public AggregatorFactories.Builder getAggregations() { + public BytesReference getAggregations() { return aggregations; } @@ -183,10 +197,18 @@ public ChunkingConfig getChunkingConfig() { return chunkingConfig; } + private static Map asMap(BytesReference bytesReference) { + return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2(); + } + /** * The lists of indices and types are compared for equality but they are not * sorted first so this test could fail simply because the indices and types * lists are in different orders. + * + * Also note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to correctly + * compare them. */ @Override public boolean equals(Object other) { @@ -206,19 +228,28 @@ public boolean equals(Object other) { && Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.indices, that.indices) && Objects.equals(this.types, that.types) - && Objects.equals(this.query, that.query) + && Objects.equals(asMap(this.query), asMap(that.query)) && Objects.equals(this.scrollSize, that.scrollSize) - && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig); } + /** + * Note this could be a heavy operation when a query or aggregations + * are set as we need to convert the bytes references into maps to + * compute a stable hash code. + */ @Override public int hashCode() { - return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields, chunkingConfig); } + public static Builder builder(String id) { + return new Builder(id); + } + public static class Builder { private String id; @@ -227,8 +258,8 @@ public static class Builder { private TimeValue frequency; private List indices; private List types; - private QueryBuilder query; - private AggregatorFactories.Builder aggregations; + private BytesReference query; + private BytesReference aggregations; private List scriptFields; private Integer scrollSize; private ChunkingConfig chunkingConfig; @@ -276,16 +307,36 @@ public Builder setFrequency(TimeValue frequency) { return this; } - public Builder setQuery(QueryBuilder query) { + private Builder setQuery(BytesReference query) { this.query = query; return this; } - public Builder setAggregations(AggregatorFactories.Builder aggregations) { + public Builder setQuery(String queryAsJson) { + this.query = queryAsJson == null ? null : new BytesArray(queryAsJson); + return this; + } + + public Builder setQuery(QueryBuilder query) throws IOException { + this.query = query == null ? null : xContentToBytes(query); + return this; + } + + private Builder setAggregations(BytesReference aggregations) { this.aggregations = aggregations; return this; } + public Builder setAggregations(String aggsAsJson) { + this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson); + return this; + } + + public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException { + this.aggregations = aggregations == null ? null : xContentToBytes(aggregations); + return this; + } + public Builder setScriptFields(List scriptFields) { List sorted = new ArrayList<>(scriptFields); sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); @@ -307,5 +358,12 @@ public DatafeedUpdate build() { return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, chunkingConfig); } + + private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + object.toXContent(builder, ToXContentObject.EMPTY_PARAMS); + return BytesReference.bytes(builder); + } + } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 26e6251af48d0..19db672e35bcc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -24,10 +24,12 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobStatsRequest; @@ -35,14 +37,18 @@ import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.JobUpdateTests; import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -203,6 +209,47 @@ public void testUpdateJob() throws Exception { } } + public void testPutDatafeed() throws IOException { + DatafeedConfig datafeed = DatafeedConfigTests.createRandom(); + PutDatafeedRequest putDatafeedRequest = new PutDatafeedRequest(datafeed); + + Request request = MLRequestConverters.putDatafeed(putDatafeedRequest); + + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_xpack/ml/datafeeds/" + datafeed.getId())); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + DatafeedConfig parsedDatafeed = DatafeedConfig.PARSER.apply(parser, null).build(); + assertThat(parsedDatafeed, equalTo(datafeed)); + } + } + + public void testDeleteForecast() throws Exception { + String jobId = randomAlphaOfLength(10); + DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(jobId); + + Request request = MLRequestConverters.deleteForecast(deleteForecastRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_forecast", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("timeout")); + assertFalse(request.getParameters().containsKey("allow_no_forecasts")); + + deleteForecastRequest.setForecastIds(randomAlphaOfLength(10), randomAlphaOfLength(10)); + deleteForecastRequest.timeout("10s"); + deleteForecastRequest.setAllowNoForecasts(true); + + request = MLRequestConverters.deleteForecast(deleteForecastRequest); + assertEquals( + "/_xpack/ml/anomaly_detectors/" + + jobId + + "/_forecast/" + + Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds()), + request.getEndpoint()); + assertEquals("10s", + request.getParameters().get(DeleteForecastRequest.TIMEOUT.getPreferredName())); + assertEquals(Boolean.toString(true), + request.getParameters().get(DeleteForecastRequest.ALLOW_NO_FORECASTS.getPreferredName())); + } + public void testGetBuckets() throws IOException { String jobId = randomAlphaOfLength(10); GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId); @@ -220,6 +267,21 @@ public void testGetBuckets() throws IOException { } } + public void testGetCategories() throws IOException { + String jobId = randomAlphaOfLength(10); + GetCategoriesRequest getCategoriesRequest = new GetCategoriesRequest(jobId); + getCategoriesRequest.setPageParams(new PageParams(100, 300)); + + + Request request = MLRequestConverters.getCategories(getCategoriesRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/categories", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetCategoriesRequest parsedRequest = GetCategoriesRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getCategoriesRequest)); + } + } + public void testGetOverallBuckets() throws IOException { String jobId = randomAlphaOfLength(10); GetOverallBucketsRequest getOverallBucketsRequest = new GetOverallBucketsRequest(jobId); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java index 40d8596d1ba86..ddaec64157381 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetOverallBucketsRequest; @@ -126,11 +128,150 @@ private void addRecordIndexRequest(long timestamp, boolean isInterim, BulkReques bulkRequest.add(indexRequest); } + private void addCategoryIndexRequest(long categoryId, String categoryName, BulkRequest bulkRequest) { + IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC); + indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"category_id\": " + categoryId + ", \"terms\": \"" + + categoryName + "\", \"regex\": \".*?" + categoryName + ".*\", \"max_matching_length\": 3, \"examples\": [\"" + + categoryName + "\"]}", XContentType.JSON); + bulkRequest.add(indexRequest); + } + + private void addCategoriesIndexRequests(BulkRequest bulkRequest) { + + List categories = Arrays.asList("AAL", "JZA", "JBU"); + + for (int i = 0; i < categories.size(); i++) { + addCategoryIndexRequest(i+1, categories.get(i), bulkRequest); + } + } + @After public void deleteJob() throws IOException { new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); } + public void testGetCategories() throws IOException { + + // index some category results + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + addCategoriesIndexRequests(bulkRequest); + + highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setPageParams(new PageParams(0, 10000)); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.categories().size(), equalTo(3)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(1L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("AAL")); + + assertThat(response.categories().get(1).getCategoryId(), equalTo(2L)); + assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(1).getRegex(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(1).getTerms(), equalTo("JZA")); + + assertThat(response.categories().get(2).getCategoryId(), equalTo(3L)); + assertThat(response.categories().get(2).getGrokPattern(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(2).getRegex(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(2).getTerms(), equalTo("JBU")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setPageParams(new PageParams(0, 1)); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.categories().size(), equalTo(1)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(1L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("AAL")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setPageParams(new PageParams(1, 2)); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(3L)); + assertThat(response.categories().size(), equalTo(2)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(2L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("JZA")); + + assertThat(response.categories().get(1).getCategoryId(), equalTo(3L)); + assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(1).getRegex(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(1).getTerms(), equalTo("JBU")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(0L); // request a non-existent category + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(0L)); + assertThat(response.categories().size(), equalTo(0)); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(1L); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.categories().size(), equalTo(1)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(1L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("AAL")); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(2L); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(2L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("JZA")); + + } + { + GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID); + request.setCategoryId(3L); + + GetCategoriesResponse response = execute(request, machineLearningClient::getCategories, + machineLearningClient::getCategoriesAsync); + + assertThat(response.count(), equalTo(1L)); + assertThat(response.categories().get(0).getCategoryId(), equalTo(3L)); + assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(0).getRegex(), equalTo(".*?JBU.*")); + assertThat(response.categories().get(0).getTerms(), equalTo("JBU")); + } + } + public void testGetBuckets() throws IOException { MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index fb715683b2709..c0bf1055058a5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -20,33 +20,40 @@ import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.client.ml.ForecastJobRequest; -import org.elasticsearch.client.ml.ForecastJobResponse; -import org.elasticsearch.client.ml.PostDataRequest; -import org.elasticsearch.client.ml.PostDataResponse; -import org.elasticsearch.client.ml.UpdateJobRequest; -import org.elasticsearch.client.ml.job.config.JobUpdate; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.client.ml.GetJobStatsRequest; -import org.elasticsearch.client.ml.GetJobStatsResponse; -import org.elasticsearch.client.ml.job.config.JobState; -import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.ForecastJobRequest; +import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostDataRequest; +import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutDatafeedResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.FlushJobRequest; -import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.common.unit.TimeValue; import org.junit.After; import java.io.IOException; @@ -288,6 +295,92 @@ public void testUpdateJob() throws Exception { assertEquals("Updated description", getResponse.jobs().get(0).getDescription()); } + public void testPutDatafeed() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + execute(new PutJobRequest(job), machineLearningClient::putJob, machineLearningClient::putJobAsync); + + String datafeedId = "datafeed-" + jobId; + DatafeedConfig datafeedConfig = DatafeedConfig.builder(datafeedId, jobId).setIndices("some_data_index").build(); + + PutDatafeedResponse response = execute(new PutDatafeedRequest(datafeedConfig), machineLearningClient::putDatafeed, + machineLearningClient::putDatafeedAsync); + + DatafeedConfig createdDatafeed = response.getResponse(); + assertThat(createdDatafeed.getId(), equalTo(datafeedId)); + assertThat(createdDatafeed.getIndices(), equalTo(datafeedConfig.getIndices())); + } + + public void testDeleteForecast() throws Exception { + String jobId = "test-delete-forecast"; + + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + Job noForecastsJob = buildJob("test-delete-forecast-none"); + machineLearningClient.putJob(new PutJobRequest(noForecastsJob), RequestOptions.DEFAULT); + + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + for(int i = 0; i < 30; i++) { + Map hashMap = new HashMap<>(); + hashMap.put("total", randomInt(1000)); + hashMap.put("timestamp", (i+1)*1000); + builder.addDoc(hashMap); + } + + PostDataRequest postDataRequest = new PostDataRequest(jobId, builder); + machineLearningClient.postData(postDataRequest, RequestOptions.DEFAULT); + machineLearningClient.flushJob(new FlushJobRequest(jobId), RequestOptions.DEFAULT); + ForecastJobResponse forecastJobResponse1 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT); + ForecastJobResponse forecastJobResponse2 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT); + waitForForecastToComplete(jobId, forecastJobResponse1.getForecastId()); + waitForForecastToComplete(jobId, forecastJobResponse2.getForecastId()); + + { + DeleteForecastRequest request = new DeleteForecastRequest(jobId); + request.setForecastIds(forecastJobResponse1.getForecastId(), forecastJobResponse2.getForecastId()); + AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast, + machineLearningClient::deleteForecastAsync); + assertTrue(response.isAcknowledged()); + assertFalse(forecastExists(jobId, forecastJobResponse1.getForecastId())); + assertFalse(forecastExists(jobId, forecastJobResponse2.getForecastId())); + } + { + DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId()); + request.setAllowNoForecasts(true); + AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast, + machineLearningClient::deleteForecastAsync); + assertTrue(response.isAcknowledged()); + } + { + DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId()); + request.setAllowNoForecasts(false); + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(request, machineLearningClient::deleteForecast, machineLearningClient::deleteForecastAsync)); + assertThat(exception.status().getStatus(), equalTo(404)); + } + } + + private void waitForForecastToComplete(String jobId, String forecastId) throws Exception { + GetRequest request = new GetRequest(".ml-anomalies-" + jobId); + request.id(jobId + "_model_forecast_request_stats_" + forecastId); + assertBusy(() -> { + GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT); + assertTrue(getResponse.isExists()); + assertTrue(getResponse.getSourceAsString().contains("finished")); + }, 30, TimeUnit.SECONDS); + } + + private boolean forecastExists(String jobId, String forecastId) throws Exception { + GetRequest getRequest = new GetRequest(".ml-anomalies-" + jobId); + getRequest.id(jobId + "_model_forecast_request_stats_" + forecastId); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); + return getResponse.isExists(); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 9abef54d0d24f..3e43792ac6a5a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -21,8 +21,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.MachineLearningGetResultsIT; import org.elasticsearch.client.MachineLearningIT; @@ -31,6 +34,7 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.FlushJobRequest; @@ -39,6 +43,8 @@ import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetCategoriesResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetJobRequest; @@ -53,28 +59,36 @@ import org.elasticsearch.client.ml.OpenJobResponse; import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataResponse; +import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutDatafeedResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.datafeed.ChunkingConfig; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisLimits; import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.DetectionRule; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.job.process.DataCounts; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.ModelPlotConfig; import org.elasticsearch.client.ml.job.config.Operator; import org.elasticsearch.client.ml.job.config.RuleCondition; +import org.elasticsearch.client.ml.job.process.DataCounts; import org.elasticsearch.client.ml.job.results.AnomalyRecord; import org.elasticsearch.client.ml.job.results.Bucket; +import org.elasticsearch.client.ml.job.results.CategoryDefinition; import org.elasticsearch.client.ml.job.results.Influencer; import org.elasticsearch.client.ml.job.results.OverallBucket; import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.junit.After; import java.io.IOException; @@ -90,6 +104,7 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.core.Is.is; @@ -182,8 +197,6 @@ public void onFailure(Exception e) { public void testGetJob() throws Exception { RestHighLevelClient client = highLevelClient(); - String jobId = "get-machine-learning-job1"; - Job job = MachineLearningIT.buildJob("get-machine-learning-job1"); client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); @@ -473,7 +486,107 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } - + + public void testPutDatafeed() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + // We need to create a job for the datafeed request to be valid + String jobId = "put-datafeed-job-1"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + String id = "datafeed-1"; + + //tag::x-pack-ml-create-datafeed-config + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder(id, jobId) // <1> + .setIndices("index_1", "index_2"); // <2> + //end::x-pack-ml-create-datafeed-config + + AggregatorFactories.Builder aggs = AggregatorFactories.builder(); + + //tag::x-pack-ml-create-datafeed-config-set-aggregations + datafeedBuilder.setAggregations(aggs); // <1> + //end::x-pack-ml-create-datafeed-config-set-aggregations + + // Clearing aggregation to avoid complex validation rules + datafeedBuilder.setAggregations((String) null); + + //tag::x-pack-ml-create-datafeed-config-set-chunking-config + datafeedBuilder.setChunkingConfig(ChunkingConfig.newAuto()); // <1> + //end::x-pack-ml-create-datafeed-config-set-chunking-config + + //tag::x-pack-ml-create-datafeed-config-set-frequency + datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(30)); // <1> + //end::x-pack-ml-create-datafeed-config-set-frequency + + //tag::x-pack-ml-create-datafeed-config-set-query + datafeedBuilder.setQuery(QueryBuilders.matchAllQuery()); // <1> + //end::x-pack-ml-create-datafeed-config-set-query + + //tag::x-pack-ml-create-datafeed-config-set-query-delay + datafeedBuilder.setQueryDelay(TimeValue.timeValueMinutes(1)); // <1> + //end::x-pack-ml-create-datafeed-config-set-query-delay + + List scriptFields = Collections.emptyList(); + //tag::x-pack-ml-create-datafeed-config-set-script-fields + datafeedBuilder.setScriptFields(scriptFields); // <1> + //end::x-pack-ml-create-datafeed-config-set-script-fields + + //tag::x-pack-ml-create-datafeed-config-set-scroll-size + datafeedBuilder.setScrollSize(1000); // <1> + //end::x-pack-ml-create-datafeed-config-set-scroll-size + + //tag::x-pack-ml-put-datafeed-request + PutDatafeedRequest request = new PutDatafeedRequest(datafeedBuilder.build()); // <1> + //end::x-pack-ml-put-datafeed-request + + //tag::x-pack-ml-put-datafeed-execute + PutDatafeedResponse response = client.machineLearning().putDatafeed(request, RequestOptions.DEFAULT); + //end::x-pack-ml-put-datafeed-execute + + //tag::x-pack-ml-put-datafeed-response + DatafeedConfig datafeed = response.getResponse(); // <1> + //end::x-pack-ml-put-datafeed-response + assertThat(datafeed.getId(), equalTo("datafeed-1")); + } + { + // We need to create a job for the datafeed request to be valid + String jobId = "put-datafeed-job-2"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + String id = "datafeed-2"; + + DatafeedConfig datafeed = new DatafeedConfig.Builder(id, jobId).setIndices("index_1", "index_2").build(); + + PutDatafeedRequest request = new PutDatafeedRequest(datafeed); + // tag::x-pack-ml-put-datafeed-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutDatafeedResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-put-datafeed-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-put-datafeed-execute-async + client.machineLearning().putDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-put-datafeed-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetBuckets() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); @@ -636,8 +749,85 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testDeleteForecast() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("deleting-forecast-for-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder(); + for(int i = 0; i < 30; i++) { + Map hashMap = new HashMap<>(); + hashMap.put("total", randomInt(1000)); + hashMap.put("timestamp", (i+1)*1000); + builder.addDoc(hashMap); + } + PostDataRequest postDataRequest = new PostDataRequest(job.getId(), builder); + client.machineLearning().postData(postDataRequest, RequestOptions.DEFAULT); + client.machineLearning().flushJob(new FlushJobRequest(job.getId()), RequestOptions.DEFAULT); + ForecastJobResponse forecastJobResponse = client.machineLearning(). + forecastJob(new ForecastJobRequest(job.getId()), RequestOptions.DEFAULT); + String forecastId = forecastJobResponse.getForecastId(); + + GetRequest request = new GetRequest(".ml-anomalies-" + job.getId()); + request.id(job.getId() + "_model_forecast_request_stats_" + forecastId); + assertBusy(() -> { + GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT); + assertTrue(getResponse.isExists()); + assertTrue(getResponse.getSourceAsString().contains("finished")); + }, 30, TimeUnit.SECONDS); + { + //tag::x-pack-ml-delete-forecast-request + DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest("deleting-forecast-for-job"); //<1> + //end::x-pack-ml-delete-forecast-request + + //tag::x-pack-ml-delete-forecast-request-options + deleteForecastRequest.setForecastIds(forecastId); //<1> + deleteForecastRequest.timeout("30s"); //<2> + deleteForecastRequest.setAllowNoForecasts(true); //<3> + //end::x-pack-ml-delete-forecast-request-options + + //tag::x-pack-ml-delete-forecast-execute + AcknowledgedResponse deleteForecastResponse = client.machineLearning().deleteForecast(deleteForecastRequest, + RequestOptions.DEFAULT); + //end::x-pack-ml-delete-forecast-execute + + //tag::x-pack-ml-delete-forecast-response + boolean isAcknowledged = deleteForecastResponse.isAcknowledged(); //<1> + //end::x-pack-ml-delete-forecast-response + } + { + //tag::x-pack-ml-delete-forecast-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse DeleteForecastResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-delete-forecast-listener + DeleteForecastRequest deleteForecastRequest = DeleteForecastRequest.deleteAllForecasts(job.getId()); + deleteForecastRequest.setAllowNoForecasts(true); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-delete-forecast-execute-async + client.machineLearning().deleteForecastAsync(deleteForecastRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-delete-forecast-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetJobStats() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -1111,4 +1301,74 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testGetCategories() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-get-categories"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a category + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\": \"test-get-categories\", \"category_id\": 1, \"terms\": \"AAL\"," + + " \"regex\": \".*?AAL.*\", \"max_matching_length\": 3, \"examples\": [\"AAL\"]}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::x-pack-ml-get-categories-request + GetCategoriesRequest request = new GetCategoriesRequest(jobId); // <1> + // end::x-pack-ml-get-categories-request + + // tag::x-pack-ml-get-categories-category-id + request.setCategoryId(1L); // <1> + // end::x-pack-ml-get-categories-category-id + + // tag::x-pack-ml-get-categories-page + request.setPageParams(new PageParams(100, 200)); // <1> + // end::x-pack-ml-get-categories-page + + // Set page params back to null so the response contains the category we indexed + request.setPageParams(null); + + // tag::x-pack-ml-get-categories-execute + GetCategoriesResponse response = client.machineLearning().getCategories(request, RequestOptions.DEFAULT); + // end::x-pack-ml-get-categories-execute + + // tag::x-pack-ml-get-categories-response + long count = response.count(); // <1> + List categories = response.categories(); // <2> + // end::x-pack-ml-get-categories-response + assertEquals(1, categories.size()); + } + { + GetCategoriesRequest request = new GetCategoriesRequest(jobId); + + // tag::x-pack-ml-get-categories-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetCategoriesResponse getcategoriesResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-categories-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-categories-execute-async + client.machineLearning().getCategoriesAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-categories-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteForecastRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteForecastRequestTests.java new file mode 100644 index 0000000000000..ad01227771185 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteForecastRequestTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class DeleteForecastRequestTests extends AbstractXContentTestCase { + + @Override + protected DeleteForecastRequest createTestInstance() { + + DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(JobTests.randomValidJobId()); + if (randomBoolean()) { + int length = randomInt(10); + List ids = new ArrayList<>(length); + for(int i = 0; i < length; i++) { + ids.add(randomAlphaOfLength(10)); + } + deleteForecastRequest.setForecastIds(ids); + } + if (randomBoolean()) { + deleteForecastRequest.setAllowNoForecasts(randomBoolean()); + } + if (randomBoolean()) { + deleteForecastRequest.timeout(randomTimeValue()); + } + return deleteForecastRequest; + } + + @Override + protected DeleteForecastRequest doParseInstance(XContentParser parser) throws IOException { + return DeleteForecastRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java new file mode 100644 index 0000000000000..7d9fe2b238f75 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class GetCategoriesRequestTests extends AbstractXContentTestCase { + + @Override + protected GetCategoriesRequest createTestInstance() { + GetCategoriesRequest request = new GetCategoriesRequest(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setCategoryId(randomNonNegativeLong()); + } else { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + return request; + } + + @Override + protected GetCategoriesRequest doParseInstance(XContentParser parser) throws IOException { + return GetCategoriesRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesResponseTests.java new file mode 100644 index 0000000000000..e8718ba20e9ce --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.results.CategoryDefinition; +import org.elasticsearch.client.ml.job.results.CategoryDefinitionTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetCategoriesResponseTests extends AbstractXContentTestCase { + + @Override + protected GetCategoriesResponse createTestInstance() { + String jobId = randomAlphaOfLength(20); + int listSize = randomInt(10); + List categories = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + CategoryDefinition category = CategoryDefinitionTests.createTestInstance(jobId); + categories.add(category); + } + return new GetCategoriesResponse(categories, listSize); + } + + @Override + protected GetCategoriesResponse doParseInstance(XContentParser parser) throws IOException { + return GetCategoriesResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedRequestTests.java new file mode 100644 index 0000000000000..5af30d3257479 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + + +public class PutDatafeedRequestTests extends AbstractXContentTestCase { + + @Override + protected PutDatafeedRequest createTestInstance() { + return new PutDatafeedRequest(DatafeedConfigTests.createRandom()); + } + + @Override + protected PutDatafeedRequest doParseInstance(XContentParser parser) { + return new PutDatafeedRequest(DatafeedConfig.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedResponseTests.java new file mode 100644 index 0000000000000..5b2428167b9ea --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutDatafeedResponseTests.java @@ -0,0 +1,49 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + +public class PutDatafeedResponseTests extends AbstractXContentTestCase { + + @Override + protected PutDatafeedResponse createTestInstance() { + return new PutDatafeedResponse(DatafeedConfigTests.createRandom()); + } + + @Override + protected PutDatafeedResponse doParseInstance(XContentParser parser) throws IOException { + return PutDatafeedResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index 8ed51415521af..3a7910ad73281 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.client.ml.datafeed; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -27,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; @@ -36,19 +34,26 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; public class DatafeedConfigTests extends AbstractXContentTestCase { @Override protected DatafeedConfig createTestInstance() { + return createRandom(); + } + + public static DatafeedConfig createRandom() { long bucketSpanMillis = 3600000; DatafeedConfig.Builder builder = constructBuilder(); builder.setIndices(randomStringList(1, 10)); builder.setTypes(randomStringList(0, 10)); if (randomBoolean()) { - builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + try { + builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } catch (IOException e) { + throw new RuntimeException("Failed to serialize query", e); + } } boolean addScriptFields = randomBoolean(); if (addScriptFields) { @@ -72,7 +77,11 @@ protected DatafeedConfig createTestInstance() { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); aggs.addAggregator(AggregationBuilders.dateHistogram("buckets") .interval(aggHistogramInterval).subAggregation(maxTime).field("time")); - builder.setAggregations(aggs); + try { + builder.setAggregations(aggs); + } catch (IOException e) { + throw new RuntimeException("failed to serialize aggs", e); + } } if (randomBoolean()) { builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); @@ -93,12 +102,6 @@ protected DatafeedConfig createTestInstance() { return builder.build(); } - @Override - protected NamedXContentRegistry xContentRegistry() { - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); - return new NamedXContentRegistry(searchModule.getNamedXContents()); - } - public static List randomStringList(int min, int max) { int size = scaledRandomIntBetween(min, max); List list = new ArrayList<>(); @@ -150,21 +153,6 @@ public void testCheckValid_GivenNullJobId() { expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(randomValidDatafeedId(), null)); } - public void testCheckValid_GivenNullIndices() { - DatafeedConfig.Builder conf = constructBuilder(); - expectThrows(NullPointerException.class, () -> conf.setIndices(null)); - } - - public void testCheckValid_GivenNullType() { - DatafeedConfig.Builder conf = constructBuilder(); - expectThrows(NullPointerException.class, () -> conf.setTypes(null)); - } - - public void testCheckValid_GivenNullQuery() { - DatafeedConfig.Builder conf = constructBuilder(); - expectThrows(NullPointerException.class, () -> conf.setQuery(null)); - } - public static String randomValidDatafeedId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java index 3dddad3c01676..1c3723fd0a631 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdateTests.java @@ -18,19 +18,16 @@ */ package org.elasticsearch.client.ml.datafeed; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.AbstractXContentTestCase; +import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; public class DatafeedUpdateTests extends AbstractXContentTestCase { @@ -54,7 +51,11 @@ protected DatafeedUpdate createTestInstance() { builder.setTypes(DatafeedConfigTests.randomStringList(1, 10)); } if (randomBoolean()) { - builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + try { + builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } catch (IOException e) { + throw new RuntimeException("Failed to serialize query", e); + } } if (randomBoolean()) { int scriptsSize = randomInt(3); @@ -71,7 +72,11 @@ protected DatafeedUpdate createTestInstance() { // Testing with a single agg is ok as we don't have special list xcontent logic AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); - builder.setAggregations(aggs); + try { + builder.setAggregations(aggs); + } catch (IOException e) { + throw new RuntimeException("Failed to serialize aggs", e); + } } if (randomBoolean()) { builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); @@ -91,11 +96,4 @@ protected DatafeedUpdate doParseInstance(XContentParser parser) { protected boolean supportsUnknownFields() { return false; } - - @Override - protected NamedXContentRegistry xContentRegistry() { - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); - return new NamedXContentRegistry(searchModule.getNamedXContents()); - } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java index 27e15a1600d38..63f261583869f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/results/CategoryDefinitionTests.java @@ -25,7 +25,7 @@ public class CategoryDefinitionTests extends AbstractXContentTestCase { - public CategoryDefinition createTestInstance(String jobId) { + public static CategoryDefinition createTestInstance(String jobId) { CategoryDefinition categoryDefinition = new CategoryDefinition(jobId); categoryDefinition.setCategoryId(randomLong()); categoryDefinition.setTerms(randomAlphaOfLength(10)); diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java index 13128b9478e0c..2d57644f9a727 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.unconfigurednodename; +package org.elasticsearch.test.rest; import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; diff --git a/docs/build.gradle b/docs/build.gradle index c6a7a8d48374b..f2a7f8511e349 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -57,6 +57,8 @@ integTestCluster { // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults systemProperty 'es.scripting.use_java_time', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false' + //TODO: remove this once the cname is prepended to the address by default in 7.0 + systemProperty 'es.http.cname_in_publish_address', 'true' } // remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed diff --git a/docs/java-rest/high-level/ml/delete-forecast.asciidoc b/docs/java-rest/high-level/ml/delete-forecast.asciidoc new file mode 100644 index 0000000000000..09aa5c734ff1e --- /dev/null +++ b/docs/java-rest/high-level/ml/delete-forecast.asciidoc @@ -0,0 +1,78 @@ +[[java-rest-high-x-pack-ml-delete-forecast]] +=== Delete Forecast API + +The Delete Forecast API provides the ability to delete a {ml} job's +forecast in the cluster. +It accepts a `DeleteForecastRequest` object and responds +with an `AcknowledgedResponse` object. + +[[java-rest-high-x-pack-ml-delete-forecast-request]] +==== Delete Forecast Request + +A `DeleteForecastRequest` object gets created with an existing non-null `jobId`. +All other fields are optional for the request. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request-options] +-------------------------------------------------- +<1> Sets the specific forecastIds to delete, can be set to `_all` to indicate ALL forecasts for the given +`jobId` +<2> Set the timeout for the request to respond, default is 30 seconds +<3> Set the `allow_no_forecasts` option. When `true` no error will be returned if an `_all` +request finds no forecasts. It defaults to `true` + +[[java-rest-high-x-pack-ml-delete-forecast-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-delete-forecast-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute-async] +-------------------------------------------------- +<1> The `DeleteForecastRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `AcknowledgedResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-delete-forecast-response]] +==== Delete Forecast Response + +An `AcknowledgedResponse` contains an acknowledgement of the forecast(s) deletion + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-response] +-------------------------------------------------- +<1> `isAcknowledged()` indicates if the forecast was successfully deleted or not. diff --git a/docs/java-rest/high-level/ml/get-categories.asciidoc b/docs/java-rest/high-level/ml/get-categories.asciidoc new file mode 100644 index 0000000000000..0e86a2b7f33a6 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-categories.asciidoc @@ -0,0 +1,83 @@ +[[java-rest-high-x-pack-ml-get-categories]] +=== Get Categories API + +The Get Categories API retrieves one or more category results. +It accepts a `GetCategoriesRequest` object and responds +with a `GetCategoriesResponse` object. + +[[java-rest-high-x-pack-ml-get-categories-request]] +==== Get Categories Request + +A `GetCategoriesRequest` object gets created with an existing non-null `jobId`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-category-id] +-------------------------------------------------- +<1> The id of the category to get. Otherwise it will return all categories. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of categories to skip. +`size` specifies the maximum number of categories to get. Defaults to `0` and `100` respectively. + +[[java-rest-high-x-pack-ml-get-categories-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute] +-------------------------------------------------- + + +[[java-rest-high-x-pack-ml-get-categories-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute-async] +-------------------------------------------------- +<1> The `GetCategoriesRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back with the `onResponse` method +if the execution is successful or the `onFailure` method if the execution +failed. + +A typical listener for `GetCategoriesResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-snapshot-ml-get-categories-response]] +==== Get Categories Response + +The returned `GetCategoriesResponse` contains the requested categories: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-response] +-------------------------------------------------- +<1> The count of categories that were matched +<2> The categories retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/put-datafeed.asciidoc b/docs/java-rest/high-level/ml/put-datafeed.asciidoc new file mode 100644 index 0000000000000..86c9d631726db --- /dev/null +++ b/docs/java-rest/high-level/ml/put-datafeed.asciidoc @@ -0,0 +1,124 @@ +[[java-rest-high-x-pack-ml-put-datafeed]] +=== Put Datafeed API + +The Put Datafeed API can be used to create a new {ml} datafeed +in the cluster. The API accepts a `PutDatafeedRequest` object +as a request and returns a `PutDatafeedResponse`. + +[[java-rest-high-x-pack-ml-put-datafeed-request]] +==== Put Datafeed Request + +A `PutDatafeedRequest` requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-request] +-------------------------------------------------- +<1> The configuration of the {ml} datafeed to create + +[[java-rest-high-x-pack-ml-put-datafeed-config]] +==== Datafeed Configuration + +The `DatafeedConfig` object contains all the details about the {ml} datafeed +configuration. + +A `DatafeedConfig` requires the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config] +-------------------------------------------------- +<1> The datafeed ID and the job ID +<2> The indices that contain the data to retrieve and feed into the job + +==== Optional Arguments +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-chunking-config] +-------------------------------------------------- +<1> Specifies how data searches are split into time chunks. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-frequency] +-------------------------------------------------- +<1> The interval at which scheduled queries are made while the datafeed runs in real time. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query] +-------------------------------------------------- +<1> A query to filter the search results by. Defaults to the `match_all` query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query-delay] +-------------------------------------------------- +<1> The time interval behind real time that data is queried. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-script-fields] +-------------------------------------------------- +<1> Allows the use of script fields. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-scroll-size] +-------------------------------------------------- +<1> The `size` parameter used in the searches. + +[[java-rest-high-x-pack-ml-put-datafeed-execution]] +==== Execution + +The Put Datafeed API can be executed through a `MachineLearningClient` +instance. Such an instance can be retrieved from a `RestHighLevelClient` +using the `machineLearning()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-put-datafeed-response]] +==== Response + +The returned `PutDatafeedResponse` returns the full representation of +the new {ml} datafeed if it has been successfully created. This will +contain the creation time and other fields initialized using +default values: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-response] +-------------------------------------------------- +<1> The created datafeed + +[[java-rest-high-x-pack-ml-put-datafeed-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-async] +-------------------------------------------------- +<1> The `PutDatafeedRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `PutDatafeedResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument diff --git a/docs/java-rest/high-level/ml/put-job.asciidoc b/docs/java-rest/high-level/ml/put-job.asciidoc index d51bb63d4054d..8c726d63b16d8 100644 --- a/docs/java-rest/high-level/ml/put-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-job.asciidoc @@ -142,7 +142,7 @@ This request can be executed asynchronously: -------------------------------------------------- include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-job-execute-async] -------------------------------------------------- -<1> The `PutMlJobRequest` to execute and the `ActionListener` to use when +<1> The `PutJobRequest` to execute and the `ActionListener` to use when the execution completes The asynchronous method does not block and returns immediately. Once it is diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 8d92653ce5702..0be681a14d1fc 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -220,12 +220,15 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <> * <> * <> +* <> * <> +* <> * <> * <> * <> * <> * <> +* <> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -234,13 +237,16 @@ include::ml/open-job.asciidoc[] include::ml/close-job.asciidoc[] include::ml/update-job.asciidoc[] include::ml/flush-job.asciidoc[] +include::ml/put-datafeed.asciidoc[] include::ml/get-job-stats.asciidoc[] include::ml/forecast-job.asciidoc[] +include::ml/delete-forecast.asciidoc[] include::ml/get-buckets.asciidoc[] include::ml/get-overall-buckets.asciidoc[] include::ml/get-records.asciidoc[] include::ml/post-data.asciidoc[] include::ml/get-influencers.asciidoc[] +include::ml/get-categories.asciidoc[] == Migration APIs diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc index f531bc5d0e9e3..41bb9d38afb5d 100644 --- a/docs/reference/analysis/tokenfilters.asciidoc +++ b/docs/reference/analysis/tokenfilters.asciidoc @@ -37,6 +37,8 @@ include::tokenfilters/multiplexer-tokenfilter.asciidoc[] include::tokenfilters/condition-tokenfilter.asciidoc[] +include::tokenfilters/predicate-tokenfilter.asciidoc[] + include::tokenfilters/stemmer-tokenfilter.asciidoc[] include::tokenfilters/stemmer-override-tokenfilter.asciidoc[] diff --git a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc new file mode 100644 index 0000000000000..bebf7bd80f250 --- /dev/null +++ b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc @@ -0,0 +1,79 @@ +[[analysis-predicatefilter-tokenfilter]] +=== Predicate Token Filter Script + +The predicate_token_filter token filter takes a predicate script, and removes tokens that do +not match the predicate. + +[float] +=== Options +[horizontal] +script:: a predicate script that determines whether or not the current token will +be emitted. Note that only inline scripts are supported. + +[float] +=== Settings example + +You can set it up like: + +[source,js] +-------------------------------------------------- +PUT /condition_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "my_analyzer" : { + "tokenizer" : "standard", + "filter" : [ "my_script_filter" ] + } + }, + "filter" : { + "my_script_filter" : { + "type" : "predicate_token_filter", + "script" : { + "source" : "token.getTerm().length() > 5" <1> + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +<1> This will emit tokens that are more than 5 characters long + +And test it like: + +[source,js] +-------------------------------------------------- +POST /condition_example/_analyze +{ + "analyzer" : "my_analyzer", + "text" : "What Flapdoodle" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And it'd respond: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "Flapdoodle", <1> + "start_offset": 5, + "end_offset": 15, + "type": "", + "position": 1 <2> + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +<1> The token 'What' has been removed from the tokenstream because it does not +match the predicate. +<2> The position and offset values are unaffected by the removal of earlier tokens \ No newline at end of file diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index acff4d3b036c5..5c397d603bef3 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -172,7 +172,7 @@ GET /_search The example above creates a boolean query: -`(ny OR (new AND york)) city)` +`(ny OR (new AND york)) city` that matches documents with the term `ny` or the conjunction `new AND york`. By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`. diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 2b522062ec06c..ab52097a4c504 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -13,6 +13,9 @@ Every context mapping has a unique name and a type. There are two types: `catego and `geo`. Context mappings are configured under the `contexts` parameter in the field mapping. +NOTE: It is mandatory to provide a context when indexing and querying + a context enabled completion field. + The following defines types, each with two context mappings for a completion field: @@ -84,10 +87,6 @@ PUT place_path_category NOTE: Adding context mappings increases the index size for completion field. The completion index is entirely heap resident, you can monitor the completion field index size using <>. -NOTE: deprecated[7.0.0, Indexing a suggestion without context on a context enabled completion field is deprecated -and will be removed in the next major release. If you want to index a suggestion that matches all contexts you should -add a special context for it.] - [[suggester-context-category]] [float] ==== Category Context @@ -160,9 +159,9 @@ POST place/_search?pretty // CONSOLE // TEST[continued] -Note: deprecated[7.0.0, When no categories are provided at query-time, all indexed documents are considered. -Querying with no categories on a category enabled completion field is deprecated and will be removed in the next major release -as it degrades search performance considerably.] +NOTE: If multiple categories or category contexts are set on the query +they are merged as a disjunction. This means that suggestions match +if they contain at least one of the provided context values. Suggestions with certain categories can be boosted higher than others. The following filters suggestions by categories and additionally boosts @@ -218,6 +217,9 @@ multiple category context clauses. The following parameters are supported for a so on, by specifying a category prefix of 'type'. Defaults to `false` +NOTE: If a suggestion entry matches multiple contexts the final score is computed as the +maximum score produced by any matching contexts. + [[suggester-context-geo]] [float] ==== Geo location Context @@ -307,6 +309,10 @@ POST place/_search NOTE: When a location with a lower precision at query time is specified, all suggestions that fall within the area will be considered. +NOTE: If multiple categories or category contexts are set on the query +they are merged as a disjunction. This means that suggestions match +if they contain at least one of the provided context values. + Suggestions that are within an area represented by a geohash can also be boosted higher than others, as shown by the following: @@ -349,6 +355,9 @@ POST place/_search?pretty that fall under the geohash representation of '(43.6624803, -79.3863353)' with a default precision of '6' by a factor of `2` +NOTE: If a suggestion entry matches multiple contexts the final score is computed as the +maximum score produced by any matching contexts. + In addition to accepting context values, a context query can be composed of multiple context clauses. The following parameters are supported for a `category` context clause: diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index f2e9077e20ed1..dffdc48fe7bb0 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -295,8 +295,9 @@ as _properties_ within Windows Installer documentation) that can be passed to `m `SKIPSETTINGPASSWORDS`:: - When installing with a `Trial` license and X-Pack Security enabled, whether the - installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. + When installing with a `Trial` license and {security} enabled, whether the + installation should skip setting up the built-in users `elastic`, `kibana`, + `logstash_system`, `apm_system`, and `beats_system`. Defaults to `false` `ELASTICUSERPASSWORD`:: diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc index 4b38e869e25fc..4282264e39524 100644 --- a/docs/reference/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -11,7 +11,7 @@ cluster where {xpack} is installed, then you must download and configure the . Add the {xpack} transport JAR file to your *CLASSPATH*. You can download the {xpack} distribution and extract the JAR file manually or you can get it from the -https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[Elasticsearch Maven repository]. +https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[Elasticsearc Maven repository]. As with any dependency, you will also need its transitive dependencies. Refer to the https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.pom[X-Pack POM file for your version] when downloading for offline usage. diff --git a/docs/reference/upgrade/set-paths-tip.asciidoc b/docs/reference/upgrade/set-paths-tip.asciidoc index adfe3e29dac3a..2dd120767c268 100644 --- a/docs/reference/upgrade/set-paths-tip.asciidoc +++ b/docs/reference/upgrade/set-paths-tip.asciidoc @@ -2,7 +2,7 @@ ================================================ When you extract the zip or tarball packages, the `elasticsearch-n.n.n` -directory contains the Elasticsearch `config`, `data`, `logs` and +directory contains the Elasticsearh `config`, `data`, `logs` and `plugins` directories. We recommend moving these directories out of the Elasticsearch directory diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java index 7de588a958c77..3bda6f393bfdf 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/AnalysisPredicateScript.java @@ -19,6 +19,13 @@ package org.elasticsearch.analysis.common; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; +import org.apache.lucene.analysis.tokenattributes.TypeAttribute; +import org.apache.lucene.util.AttributeSource; import org.elasticsearch.script.ScriptContext; /** @@ -30,21 +37,40 @@ public abstract class AnalysisPredicateScript { * Encapsulation of the state of the current token */ public static class Token { - public CharSequence term; - public int pos; - public int posInc; - public int posLen; - public int startOffset; - public int endOffset; - public String type; - public boolean isKeyword; + + private final CharTermAttribute termAtt; + private final PositionIncrementAttribute posIncAtt; + private final PositionLengthAttribute posLenAtt; + private final OffsetAttribute offsetAtt; + private final TypeAttribute typeAtt; + private final KeywordAttribute keywordAtt; + + // posInc is always 1 at the beginning of a tokenstream and the convention + // from the _analyze endpoint is that tokenstream positions are 0-based + private int pos = -1; + + /** + * Create a token exposing values from an AttributeSource + */ + public Token(AttributeSource source) { + this.termAtt = source.addAttribute(CharTermAttribute.class); + this.posIncAtt = source.addAttribute(PositionIncrementAttribute.class); + this.posLenAtt = source.addAttribute(PositionLengthAttribute.class); + this.offsetAtt = source.addAttribute(OffsetAttribute.class); + this.typeAtt = source.addAttribute(TypeAttribute.class); + this.keywordAtt = source.addAttribute(KeywordAttribute.class); + } + + public void updatePosition() { + this.pos = this.pos + posIncAtt.getPositionIncrement(); + } public CharSequence getTerm() { - return term; + return termAtt; } public int getPositionIncrement() { - return posInc; + return posIncAtt.getPositionIncrement(); } public int getPosition() { @@ -52,23 +78,23 @@ public int getPosition() { } public int getPositionLength() { - return posLen; + return posLenAtt.getPositionLength(); } public int getStartOffset() { - return startOffset; + return offsetAtt.startOffset(); } public int getEndOffset() { - return endOffset; + return offsetAtt.endOffset(); } public String getType() { - return type; + return typeAtt.type(); } public boolean isKeyword() { - return isKeyword; + return keywordAtt.isKeyword(); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 75ebade0b12bd..175935258ad6e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -264,6 +264,8 @@ public Map> getTokenFilters() { filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); filters.put("porter_stem", PorterStemTokenFilterFactory::new); + filters.put("predicate_token_filter", + requiresAnalysisSettings((i, e, n, s) -> new PredicateTokenFilterScriptFactory(i, n, s, scriptService.get()))); filters.put("remove_duplicates", RemoveDuplicatesTokenFilterFactory::new); filters.put("reverse", ReverseTokenFilterFactory::new); filters.put("russian_stem", RussianStemTokenFilterFactory::new); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PredicateTokenFilterScriptFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PredicateTokenFilterScriptFactory.java new file mode 100644 index 0000000000000..84f4bb487060c --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PredicateTokenFilterScriptFactory.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.FilteringTokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; + +import java.io.IOException; + +/** + * A factory for creating FilteringTokenFilters that determine whether or not to + * accept their underlying token by consulting a script + */ +public class PredicateTokenFilterScriptFactory extends AbstractTokenFilterFactory { + + private final AnalysisPredicateScript.Factory factory; + + public PredicateTokenFilterScriptFactory(IndexSettings indexSettings, String name, Settings settings, ScriptService scriptService) { + super(indexSettings, name, settings); + Settings scriptSettings = settings.getAsSettings("script"); + Script script = Script.parse(scriptSettings); + if (script.getType() != ScriptType.INLINE) { + throw new IllegalArgumentException("Cannot use stored scripts in tokenfilter [" + name + "]"); + } + this.factory = scriptService.compile(script, AnalysisPredicateScript.CONTEXT); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new ScriptFilteringTokenFilter(tokenStream, factory.newInstance()); + } + + private static class ScriptFilteringTokenFilter extends FilteringTokenFilter { + + final AnalysisPredicateScript script; + final AnalysisPredicateScript.Token token; + + ScriptFilteringTokenFilter(TokenStream in, AnalysisPredicateScript script) { + super(in); + this.script = script; + this.token = new AnalysisPredicateScript.Token(this); + } + + @Override + protected boolean accept() throws IOException { + token.updatePosition(); + return script.execute(token); + } + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java index cf7fd5b047a89..56f60bb874a5b 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterFactory.java @@ -21,12 +21,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; -import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; -import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; @@ -36,6 +30,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -76,30 +71,26 @@ public TokenStream create(TokenStream tokenStream) { } return in; }; - AnalysisPredicateScript script = factory.newInstance(); - final AnalysisPredicateScript.Token token = new AnalysisPredicateScript.Token(); - return new ConditionalTokenFilter(tokenStream, filter) { + return new ScriptedConditionTokenFilter(tokenStream, filter, factory.newInstance()); + } - CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class); - PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); - OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); - TypeAttribute typeAtt = addAttribute(TypeAttribute.class); - KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class); + private static class ScriptedConditionTokenFilter extends ConditionalTokenFilter { - @Override - protected boolean shouldFilter() { - token.term = termAtt; - token.posInc = posIncAtt.getPositionIncrement(); - token.pos += token.posInc; - token.posLen = posLenAtt.getPositionLength(); - token.startOffset = offsetAtt.startOffset(); - token.endOffset = offsetAtt.endOffset(); - token.type = typeAtt.type(); - token.isKeyword = keywordAtt.isKeyword(); - return script.execute(token); - } - }; + private final AnalysisPredicateScript script; + private final AnalysisPredicateScript.Token token; + + ScriptedConditionTokenFilter(TokenStream input, Function inputFactory, + AnalysisPredicateScript script) { + super(input, inputFactory); + this.script = script; + this.token = new AnalysisPredicateScript.Token(this); + } + + @Override + protected boolean shouldFilter() throws IOException { + token.updatePosition(); + return script.execute(token); + } } @Override diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java new file mode 100644 index 0000000000000..18afbdcecb3e6 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.util.Collections; + +public class PredicateTokenScriptFilterTests extends ESTokenStreamTestCase { + + public void testSimpleFilter() throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("index.analysis.filter.f.type", "predicate_token_filter") + .put("index.analysis.filter.f.script.source", "token.getTerm().length() > 5") + .put("index.analysis.analyzer.myAnalyzer.type", "custom") + .put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.myAnalyzer.filter", "f") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + + AnalysisPredicateScript.Factory factory = () -> new AnalysisPredicateScript() { + @Override + public boolean execute(Token token) { + return token.getTerm().length() > 5; + } + }; + + @SuppressWarnings("unchecked") + ScriptService scriptService = new ScriptService(indexSettings, Collections.emptyMap(), Collections.emptyMap()){ + @Override + public FactoryType compile(Script script, ScriptContext context) { + assertEquals(context, AnalysisPredicateScript.CONTEXT); + assertEquals(new Script("token.getTerm().length() > 5"), script); + return (FactoryType) factory; + } + }; + + CommonAnalysisPlugin plugin = new CommonAnalysisPlugin(); + plugin.createComponents(null, null, null, null, scriptService, null, null, null, null); + AnalysisModule module + = new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(plugin)); + + IndexAnalyzers analyzers = module.getAnalysisRegistry().build(idxSettings); + + try (NamedAnalyzer analyzer = analyzers.get("myAnalyzer")) { + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "Vorsprung Durch Technik", new String[]{ + "Vorsprung", "Technik" + }); + } + + } + +} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml index 4305e5db0af37..2015fe31fccb5 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/60_analysis_scripting.yml @@ -28,9 +28,44 @@ - type: condition filter: [ "lowercase" ] script: - source: "token.position > 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)" + source: "token.position >= 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)" - length: { tokens: 3 } - match: { tokens.0.token: "Vorsprung" } - match: { tokens.1.token: "durch" } - match: { tokens.2.token: "technik" } + +--- +"script_filter": + - do: + indices.analyze: + body: + text: "Vorsprung Durch Technik" + tokenizer: "whitespace" + filter: + - type: predicate_token_filter + script: + source: "token.term.length() > 5" + + - length: { tokens: 2 } + - match: { tokens.0.token: "Vorsprung" } + - match: { tokens.1.token: "Technik" } + +--- +"script_filter_position": + - do: + indices.analyze: + body: + text: "a b c d e f g h" + tokenizer: "whitespace" + filter: + - type: predicate_token_filter + script: + source: "token.position >= 4" + + - length: { tokens: 4 } + - match: { tokens.0.token: "e" } + - match: { tokens.1.token: "f" } + - match: { tokens.2.token: "g" } + - match: { tokens.3.token: "h" } + diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-4d78db26be.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index bec50d36793d8..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f469e925dde5dff81b9d56f465a8babb56cd26b \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..047bca7b614bf --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +58b9db095c569b4c4da491810f14e1429878b594 \ No newline at end of file diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index 23dc0fd276cbe..55f8deb059293 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; @@ -336,7 +336,7 @@ public void setDocument(int docid) { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index ed4b1d631e064..6bec6f5062685 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -26,6 +26,7 @@ integTestCluster { module project.project(':modules:mapper-extras') systemProperty 'es.scripting.use_java_time', 'true' systemProperty 'es.scripting.update.ctx_in_params', 'false' + systemProperty 'es.http.cname_in_publish_address', 'true' } dependencies { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java index 76bb6d14dcf61..3d19dedd3b0a3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java @@ -19,39 +19,25 @@ package org.elasticsearch.painless; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; -import java.io.IOException; import java.util.Collections; public class ScoreTests extends ScriptTestCase { /** Most of a dummy scorer impl that requires overriding just score(). */ - abstract class MockScorer extends Scorer { - MockScorer() { - super(null); - } + abstract class MockScorer extends Scorable { @Override public int docID() { return 0; } - @Override - public DocIdSetIterator iterator() { - throw new UnsupportedOperationException(); - } } public void testScoreWorks() { assertEquals(2.5, exec("_score", Collections.emptyMap(), Collections.emptyMap(), new MockScorer() { @Override - public float score() throws IOException { - return 2.5f; - } - - @Override - public float getMaxScore(int upTo) throws IOException { + public float score() { return 2.5f; } }, @@ -62,14 +48,9 @@ public void testScoreNotUsed() { assertEquals(3.5, exec("3.5", Collections.emptyMap(), Collections.emptyMap(), new MockScorer() { @Override - public float score() throws IOException { + public float score() { throw new AssertionError("score() should not be called"); } - - @Override - public float getMaxScore(int upTo) throws IOException { - return Float.MAX_VALUE; - } }, true)); } @@ -79,17 +60,12 @@ public void testScoreCached() { new MockScorer() { private boolean used = false; @Override - public float score() throws IOException { + public float score() { if (used == false) { return 4.5f; } throw new AssertionError("score() should not be called twice"); } - - @Override - public float getMaxScore(int upTo) throws IOException { - return 4.5f; - } }, true)); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 963a433f172e8..577b120fc9024 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -20,7 +20,7 @@ package org.elasticsearch.painless; import junit.framework.AssertionFailedError; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.antlr.Walker; @@ -91,7 +91,7 @@ public Object exec(String script, Map vars, boolean picky) { } /** Compiles and returns the result of {@code script} with access to {@code vars} and compile-time parameters */ - public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { + public Object exec(String script, Map vars, Map compileParams, Scorable scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { ScriptClassInfo scriptClassInfo = new ScriptClassInfo(PAINLESS_LOOKUP, GenericElasticsearchScript.class); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index 4820bc10cf24f..5c6fbc54667f2 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -19,13 +19,11 @@ package org.elasticsearch.painless; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptedMetricAggContexts; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -66,20 +64,12 @@ public void testMapBasic() { Map params = new HashMap<>(); Map state = new HashMap<>(); - Scorer scorer = new Scorer(null) { + Scorable scorer = new Scorable() { @Override public int docID() { return 0; } @Override public float score() { return 0.5f; } - - @Override - public DocIdSetIterator iterator() { return null; } - - @Override - public float getMaxScore(int upTo) throws IOException { - return 0.5f; - } }; ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, null); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java index 4469c9633dd87..064d1d1e5977c 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java @@ -21,9 +21,9 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; @@ -148,7 +148,17 @@ protected void doPostCollection() throws IOException { final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); // Set the scorer, since we now replay only the child docIds - sub.setScorer(new ConstantScoreScorer(null, 1f, childDocsIter)); + sub.setScorer(new Scorable() { + @Override + public float score() { + return 1f; + } + + @Override + public int docID() { + return childDocsIter.docID(); + } + }); final Bits liveDocs = ctx.reader().getLiveDocs(); for (int docId = childDocsIter diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index be2e7ec355ac5..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97a3758487272ba4d15720b0ca15b0f980310c89 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..7369f427ab208 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +f009ee188453aabae77fad55aea08bc60323bb3e \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index a7f63df28d7e5..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -12ed739794cd317754684308ddc5bdbdcc46cdde \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..16417bbebd1c2 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +af3d2ae975e3560c1ea69222d6c46072857952ba \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 8fc57bbf7e46d..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4da6e5c17a17f0a9a99b518ea9985ea06996b63b \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..9c3524a6789f8 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +f17bc5e532d9dc2786a13bd577df64023d1baae1 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index d94b274bf13ff..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a36b2db18a2a22966ab0bf9fced775f22dd7029d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..ac81fdd07c2e4 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +7ad89d33c1cd960c91afa05b22024137fe108567 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index f75d7abd6a36b..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f1d360a47d2fd166e970d17c46b284830e64258 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..f00a29e781618 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +3f11fb254256d74e911b953994b47e7a95915954 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 2e3943cf79345..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b07883b5e988d1d991503aa49d9b59059518825d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..76fa8e90eae98 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +b2348d140ef0c3e674cb81173f61c5e5f430facb \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 1d21c6e5b613c..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b46b3ee62932de7ba7b670820a13eb973ec5777 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..0e2c4d34ef041 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +485a0c3be58a5942b4a28639f1019181ef4cd0e3 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 5d248b22caf16..c56a9a8259af3 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -32,19 +32,23 @@ esplugin { } versions << [ - 'aws': '1.11.223' + 'aws': '1.11.406' ] dependencies { compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}" compile "com.amazonaws:aws-java-sdk-kms:${versions.aws}" compile "com.amazonaws:aws-java-sdk-core:${versions.aws}" + compile "com.amazonaws:jmespath-java:${versions.aws}" compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + compile 'joda-time:joda-time:2.10' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, // and whitelist this hack in JarHell @@ -53,6 +57,7 @@ dependencies { dependencyLicenses { mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk' + mapping from: /jmespath-java.*/, to: 'aws-java-sdk' mapping from: /jackson-.*/, to: 'jackson' mapping from: /jaxb-.*/, to: 'jaxb' } diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 deleted file mode 100644 index 9890dd8d600b3..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.223.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c3993cb44f5856fa721b7b7ccfc266377c0bf9c0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.406.jar.sha1 new file mode 100644 index 0000000000000..415373b275ead --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-core-1.11.406.jar.sha1 @@ -0,0 +1 @@ +43f3b7332d4d527bbf34d4ac6be094f3dabec6de \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 deleted file mode 100644 index d5bc9d30308dc..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.223.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c24e6ebe108c60a08098aeaad5ae0b6a5a77b618 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.406.jar.sha1 new file mode 100644 index 0000000000000..f0eb9b7175238 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-kms-1.11.406.jar.sha1 @@ -0,0 +1 @@ +e29854e58dc20f5453c1da7e580a5921b1e9714a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 deleted file mode 100644 index fe12b2d4847fa..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.223.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c2ef96732e22d97952fbcd0a94f1dc376d157eda \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.406.jar.sha1 new file mode 100644 index 0000000000000..e57fd11c82980 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-s3-1.11.406.jar.sha1 @@ -0,0 +1 @@ +5c3c2c57b076602b3aeef841c63e5848ec52b00d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jmespath-java-1.11.406.jar.sha1 b/plugins/repository-s3/licenses/jmespath-java-1.11.406.jar.sha1 new file mode 100644 index 0000000000000..bbb9b562a2fd9 --- /dev/null +++ b/plugins/repository-s3/licenses/jmespath-java-1.11.406.jar.sha1 @@ -0,0 +1 @@ +06c291d1029943d4968a36fadffa3b71a6d8b4e4 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index b177686bd71a6..a431f4da1fdf8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -23,10 +23,12 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3Client; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.internal.Constants; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; @@ -93,19 +95,26 @@ public AmazonS3Reference client(String clientName) { } } - private AmazonS3 buildClient(S3ClientSettings clientSettings) { - final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); - final ClientConfiguration configuration = buildConfiguration(clientSettings); - final AmazonS3 client = buildClient(credentials, configuration); - if (Strings.hasText(clientSettings.endpoint)) { - client.setEndpoint(clientSettings.endpoint); - } - return client; - } - // proxy for testing - AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { - return new AmazonS3Client(credentials, configuration); + AmazonS3 buildClient(final S3ClientSettings clientSettings) { + final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); + builder.withCredentials(buildCredentials(logger, clientSettings)); + builder.withClientConfiguration(buildConfiguration(clientSettings)); + + final String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; + logger.debug("using endpoint [{}]", endpoint); + + // If the endpoint configuration isn't set on the builder then the default behaviour is to try + // and work out what region we are in and use an appropriate endpoint - see AwsClientBuilder#setRegion. + // In contrast, directly-constructed clients use s3.amazonaws.com unless otherwise instructed. We currently + // use a directly-constructed client, and need to keep the existing behaviour to avoid a breaking change, + // so to move to using the builder we must set it explicitly to keep the existing behaviour. + // + // We do this because directly constructing the client is deprecated (was already deprecated in 1.1.223 too) + // so this change removes that usage of a deprecated API. + builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, null)); + + return builder.build(); } // pkg private for tests diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 7eb603b4b78e5..17797a5758312 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; @@ -70,9 +69,9 @@ static final class ProxyS3Service extends S3Service { } @Override - AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { - final AmazonS3 client = super.buildClient(credentials, configuration); - return new ClientAndCredentials(client, credentials); + AmazonS3 buildClient(final S3ClientSettings clientSettings) { + final AmazonS3 client = super.buildClient(clientSettings); + return new ClientAndCredentials(client, buildCredentials(logger, clientSettings)); } } diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index ca8371e30e7ac..8b305462e4dff 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -53,9 +53,6 @@ for (Version version : bwcVersions.indexCompatible) { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' - // debug logging for testRecovery - setting 'logger.level', 'DEBUG' - if (version.onOrAfter('5.3.0')) { setting 'http.content_type.required', 'true' } @@ -75,9 +72,6 @@ for (Version version : bwcVersions.indexCompatible) { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' - // debug logging for testRecovery - setting 'logger.level', 'DEBUG' - numNodes = 2 dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } cleanShared = false // We want to keep snapshots made by the old cluster! diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 80bed9db5f3da..7efebd1d54ae8 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -68,10 +68,8 @@ * version is started with the same data directories and then this is rerun * with {@code tests.is_old_cluster} set to {@code false}. */ -public class FullClusterRestartIT extends ESRestTestCase { - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - private final boolean supportsLenientBooleans = oldClusterVersion.before(Version.V_6_0_0_alpha1); +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { + private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1); private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); private String index; @@ -81,29 +79,9 @@ public void setIndex() { index = getTestName().toLowerCase(Locale.ROOT); } - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - public void testSearch() throws Exception { int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -169,7 +147,7 @@ public void testSearch() throws Exception { } public void testNewReplicasWork() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -237,10 +215,10 @@ public void testNewReplicasWork() throws Exception { */ public void testAliasWithBadName() throws Exception { assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before", - oldClusterVersion.before(VERSION_5_1_0_UNRELEASED)); + getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED)); int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -291,7 +269,7 @@ public void testAliasWithBadName() throws Exception { Map searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search"))); int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); assertEquals(count, totalHits); - if (runningAgainstOldCluster == false) { + if (isRunningAgainstOldCluster() == false) { // We can remove the alias. Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName)); assertEquals(200, response.getStatusLine().getStatusCode()); @@ -302,7 +280,7 @@ public void testAliasWithBadName() throws Exception { } public void testClusterState() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); mappingsAndSettings.field("template", index); @@ -341,14 +319,14 @@ public void testClusterState() throws Exception { assertEquals("0", numberOfReplicas); Version version = Version.fromId(Integer.valueOf((String) XContentMapValues.extractValue("metadata.indices." + index + ".settings.index.version.created", clusterState))); - assertEquals(oldClusterVersion, version); + assertEquals(getOldClusterVersion(), version); } public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -413,7 +391,7 @@ public void testShrink() throws IOException { public void testShrinkAfterUpgrade() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -465,7 +443,7 @@ public void testShrinkAfterUpgrade() throws IOException { int totalHits = (int) XContentMapValues.extractValue("hits.total", response); assertEquals(numDocs, totalHits); - if (runningAgainstOldCluster == false) { + if (isRunningAgainstOldCluster() == false) { response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); assertNoFailures(response); totalShards = (int) XContentMapValues.extractValue("_shards.total", response); @@ -490,7 +468,7 @@ public void testShrinkAfterUpgrade() throws IOException { * */ public void testRollover() throws IOException { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createIndex = new Request("PUT", "/" + index + "-000001"); createIndex.setJsonEntity("{" + " \"aliases\": {" @@ -511,7 +489,7 @@ public void testRollover() throws IOException { bulkRequest.addParameter("refresh", ""); assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); rolloverRequest.setJsonEntity("{" + " \"conditions\": {" @@ -529,7 +507,7 @@ public void testRollover() throws IOException { Map count = entityAsMap(client().performRequest(countRequest)); assertNoFailures(count); - int expectedCount = bulkCount + (runningAgainstOldCluster ? 0 : bulkCount); + int expectedCount = bulkCount + (isRunningAgainstOldCluster() ? 0 : bulkCount); assertEquals(expectedCount, (int) XContentMapValues.extractValue("hits.total", count)); } @@ -688,7 +666,7 @@ public void testSingleDoc() throws IOException { String docLocation = "/" + index + "/doc/1"; String doc = "{\"test\": \"test\"}"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createDoc = new Request("PUT", docLocation); createDoc.setJsonEntity(doc); client().performRequest(createDoc); @@ -703,7 +681,7 @@ public void testSingleDoc() throws IOException { public void testEmptyShard() throws IOException { final String index = "test_empty_shard"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) @@ -726,7 +704,7 @@ public void testEmptyShard() throws IOException { public void testRecovery() throws Exception { int count; boolean shouldHaveTranslog; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { count = between(200, 300); /* We've had bugs in the past where we couldn't restore * an index without a translog so we randomize whether @@ -772,7 +750,7 @@ public void testRecovery() throws Exception { String countResponse = toStr(client().performRequest(countRequest)); assertThat(countResponse, containsString("\"total\":" + count)); - if (false == runningAgainstOldCluster) { + if (false == isRunningAgainstOldCluster()) { boolean restoredFromTranslog = false; boolean foundPrimary = false; Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index); @@ -800,7 +778,7 @@ public void testRecovery() throws Exception { assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); - String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString(); + String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString(); if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { int numCurrentVersion = 0; int numBwcVersion = 0; @@ -840,7 +818,7 @@ public void testRecovery() throws Exception { */ public void testSnapshotRestore() throws IOException { int count; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { // Create the index count = between(200, 300); indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); @@ -860,7 +838,7 @@ public void testSnapshotRestore() throws IOException { // Stick a routing attribute into to cluster settings so we can see it after the restore Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); addRoutingSettings.setJsonEntity( - "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + oldClusterVersion + "\"}}"); + "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + getOldClusterVersion() + "\"}}"); client().performRequest(addRoutingSettings); // Stick a template into the cluster so we can see it after the restore @@ -885,7 +863,7 @@ public void testSnapshotRestore() throws IOException { templateBuilder.startObject("alias2"); { templateBuilder.startObject("filter"); { templateBuilder.startObject("term"); { - templateBuilder.field("version", runningAgainstOldCluster ? oldClusterVersion : Version.CURRENT); + templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Version.CURRENT); } templateBuilder.endObject(); } @@ -898,7 +876,7 @@ public void testSnapshotRestore() throws IOException { createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); client().performRequest(createTemplateRequest); - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { // Create the repo XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { repoConfig.field("type", "fs"); @@ -914,19 +892,19 @@ public void testSnapshotRestore() throws IOException { client().performRequest(createRepoRequest); } - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (runningAgainstOldCluster ? "old_snap" : "new_snap")); + Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap")); createSnapshot.addParameter("wait_for_completion", "true"); createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); - checkSnapshot("old_snap", count, oldClusterVersion); - if (false == runningAgainstOldCluster) { + checkSnapshot("old_snap", count, getOldClusterVersion()); + if (false == isRunningAgainstOldCluster()) { checkSnapshot("new_snap", count, Version.CURRENT); } } public void testHistoryUUIDIsAdded() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -1019,20 +997,14 @@ private void checkSnapshot(String snapshotName, int count, Version tookOnVersion Request clusterSettingsRequest = new Request("GET", "/_cluster/settings"); clusterSettingsRequest.addParameter("flat_settings", "true"); Map clusterSettingsResponse = entityAsMap(client().performRequest(clusterSettingsRequest)); - Map expectedClusterSettings = new HashMap<>(); - expectedClusterSettings.put("transient", emptyMap()); - expectedClusterSettings.put("persistent", - singletonMap("cluster.routing.allocation.exclude.test_attr", oldClusterVersion.toString())); - if (expectedClusterSettings.equals(clusterSettingsResponse) == false) { - NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); - builder.compareMaps(clusterSettingsResponse, expectedClusterSettings); - fail("settings don't match:\n" + builder.toString()); - } + @SuppressWarnings("unchecked") final Map persistentSettings = + (Map)clusterSettingsResponse.get("persistent"); + assertThat(persistentSettings.get("cluster.routing.allocation.exclude.test_attr"), equalTo(getOldClusterVersion().toString())); // Check that the template was restored successfully Map getTemplateResponse = entityAsMap(client().performRequest(new Request("GET", "/_template/test_template"))); Map expectedTemplate = new HashMap<>(); - if (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_0_0_beta1)) { + if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_0_0_beta1)) { expectedTemplate.put("template", "evil_*"); } else { expectedTemplate.put("index_patterns", singletonList("evil_*")); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java new file mode 100644 index 0000000000000..19fbdc92fae07 --- /dev/null +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.transport.RemoteClusterService; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS; +import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; +import static org.hamcrest.Matchers.equalTo; + +public class FullClusterRestartSettingsUpgradeIT extends AbstractFullClusterRestartTestCase { + + public void testRemoteClusterSettingsUpgraded() throws IOException { + assumeTrue("settings automatically upgraded since 6.5.0", getOldClusterVersion().before(Version.V_6_5_0)); + if (isRunningAgainstOldCluster()) { + final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("persistent"); + { + builder.field("search.remote.foo.skip_unavailable", true); + builder.field("search.remote.foo.seeds", Collections.singletonList("localhost:9200")); + } + builder.endObject(); + } + builder.endObject(); + putSettingsRequest.setJsonEntity(Strings.toString(builder)); + } + client().performRequest(putSettingsRequest); + + final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); + final Response response = client().performRequest(getSettingsRequest); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, response.getEntity().getContent())) { + final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); + final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); + + assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings)); + assertTrue(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Collections.singletonList("localhost:9200"))); + } + + assertSettingDeprecationsAndWarnings(new Setting[]{ + SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo"), + SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo")}); + } else { + final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); + final Response getSettingsResponse = client().performRequest(getSettingsRequest); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) { + final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); + final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); + + assertFalse(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue( + settings.toString(), + RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings)); + assertFalse(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Collections.singletonList("localhost:9200"))); + } + } + } + +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 49a9dec870e75..2b7250f86b7cd 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -20,10 +20,8 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -48,7 +46,6 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.rest.ESRestTestCase; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -71,7 +68,7 @@ * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ -public class QueryBuilderBWCIT extends ESRestTestCase { +public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { private static final List CANDIDATES = new ArrayList<>(); @@ -145,32 +142,9 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb}); } - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - public void testQueryBuilderBWC() throws Exception { String index = "queries"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); { @@ -230,7 +204,7 @@ public void testQueryBuilderBWC() throws Exception { byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr); try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) { try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) { - input.setVersion(oldClusterVersion); + input.setVersion(getOldClusterVersion()); QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); assert in.read() == -1; assertEquals(expectedQueryBuilder, queryBuilder); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index 59692873cc456..2725580d9e8c1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -139,12 +139,26 @@ setup: features: warnings - do: warnings: - - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' + - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]' search: body: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } +--- +"multiple docvalue_fields": + - skip: + version: " - 6.3.99" + reason: format option was added in 6.4 + features: warnings + - do: + warnings: + - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count, include.field1.keyword]' + search: + body: + docvalue_fields: [ "count", "include.field1.keyword" ] + - match: { hits.hits.0.fields.count: [1] } + --- "docvalue_fields as url param": - skip: @@ -153,7 +167,7 @@ setup: features: warnings - do: warnings: - - 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' + - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]' search: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 3a02e483d6808..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa8e0fbef3e3fcf49ace4a4153580070def770eb \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..72f7319e6af4a --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +a22f1c6749ca4a3fbc9b330161a8ea3301cac8de \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 8279b81d6cfc0..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d636541581e338a1be7e3e176aac73d7ae0b323 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..f4bf99b4a03a5 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +41ce415b93d75662cc2e790d09120bc0234d6b1b \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 683b585bb2f61..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -126faacb28d1b8cc1ab81d702973d057892120d1 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..50a21f5c504a2 --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +06c1e4fa838807059d27aaf5405cfdfe7303369c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 483f470b5e015..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abd514ec02837f48b8c478287fde7cc5d6439ada \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..76bdfa1c6c4bc --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +5b0a019a938deb58160647e7640b348bb99c10a8 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 27dd042c06bf3..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -778e87a263184b8ddcbb4ef9d244467933f32993 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..017225c0e467d --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +4d813f3ba0ddd56bac728edb88ed8875e6acfd18 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 13d2db8d210dc..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96aff29ad966204c73f8dd98d8116f09e34b6ebd \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..29cdbbfe69f3c --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +00c7e20b6a35ebecc875dd52bfb324967c5555d6 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 6e014f20c97fd..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e72e2accebb1277c57dfe21bc011195eed91dbfd \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..49087293afa7c --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +e4dbff54a0befdc7d67c0f39890586c220df718e \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 57081e7aa10ba..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf25587ebf6823781f5d7acffd7d65c46c21cb27 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..3c12235dff678 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +74d17f6bdf1fa4d499f02904432aa3b1024bde88 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 6855364592ea5..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cad42923bcb6e1c6060ae1cbab574646e8c808e \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..a423deb397de6 --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +bec78be38f777765146c35f65e247909563d6814 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index f9d037120a342..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5841d7e877e51bbd2d325709353f5ab7e94b49a \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..79195ed1d5e1c --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +74b76f8fed44400bc2a5d938ca2611a97b4d7a7c \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 45c8934a8d41b..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fefe17f6ac0c7d505c5051e96d0f4916fec2bf9e \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..d5cd94b7fe5d6 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +2f65fa728b3bc924db6538f4c3caf2fcd25451cf \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index b02408a7683b3..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -22b0a9d9fb675f7c82a7a2b18f593f3278b40f11 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..76857b72f012b --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +916a91f0cab2d3684707c59e9adca7b3030b2c66 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index d4e8b662ce465..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd6449cc67a36891f6b3201489c5ed44d795fab0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..7ab84df992bc4 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +eb3e630d6013e41838fb277943ce921f256f1c61 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 9743868e5c748..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e2a8b3e9e19ad61fcbd27a138cf55f2d6cbfb2d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..d793f4c54d9d1 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +fa10ff14eab2f579cff2f0fa33c9c7f3b24daf12 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 8b722955278cf..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd5931d1d5ca3f84565534182881565a44aeb72a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..0ea0c2fb573fd --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +3dd65ca6612b4f98530847b99ab348fd83055fdf \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index 7f36074d1459b..e28d8990c91e3 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -19,9 +19,9 @@ package org.apache.lucene.search.grouping; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TotalHits; @@ -44,7 +44,7 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec protected final String collapseField; protected final Sort sort; - protected Scorer scorer; + protected Scorable scorer; private int totalHitCount; @@ -102,7 +102,7 @@ public ScoreMode scoreMode() { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { super.setScorer(scorer); this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 5459805416e91..a9d83cfbce628 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; @@ -171,6 +172,12 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize"); } } + if (IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(metaData.getSettings()) && + IndexSettings.INDEX_SOFT_DELETES_SETTING.get(metaData.getSettings()) && + IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(targetIndexSettings) && + IndexSettings.INDEX_SOFT_DELETES_SETTING.get(targetIndexSettings) == false) { + throw new IllegalArgumentException("Can't disable [index.soft_deletes.enabled] setting on resize"); + } String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index"; targetIndex.cause(cause); Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings); diff --git a/server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java b/server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java index 071cd92330496..2959802f2e3f5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java +++ b/server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java @@ -19,8 +19,8 @@ package org.elasticsearch.action.search; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SimpleCollector; import java.io.IOException; @@ -30,12 +30,12 @@ */ public class MaxScoreCollector extends SimpleCollector { - private Scorer scorer; + private Scorable scorer; private float maxScore = Float.NEGATIVE_INFINITY; private boolean hasHits = false; @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index e41911d123e5c..9466b03c442a0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -749,7 +749,8 @@ static void prepareResizeIndexSettings( } } else { final Predicate sourceSettingsPredicate = - (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) + (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || + s.startsWith("index.sort.") || s.equals("index.soft_deletes.enabled")) && indexSettingsBuilder.keys().contains(s) == false; builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 47453aa8a41db..dc8628f184e43 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -745,31 +745,6 @@ public static Version parse(String toParse, Version defaultValue) { } } - /** - * Return a Scorer that throws an ElasticsearchIllegalStateException - * on all operations with the given message. - */ - public static Scorer illegalScorer(final String message) { - return new Scorer(null) { - @Override - public float score() throws IOException { - throw new IllegalStateException(message); - } - @Override - public int docID() { - throw new IllegalStateException(message); - } - @Override - public DocIdSetIterator iterator() { - throw new IllegalStateException(message); - } - @Override - public float getMaxScore(int upTo) throws IOException { - throw new IllegalStateException(message); - } - }; - } - private static final class CommitPoint extends IndexCommit { private String segmentsFileName; private final Collection files; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java b/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java index 76b59887fb946..f99d68952e557 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java @@ -22,9 +22,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreCachingWrappingScorer; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SimpleCollector; import java.io.IOException; @@ -34,7 +34,7 @@ public class MinimumScoreCollector extends SimpleCollector { private final Collector collector; private final float minimumScore; - private Scorer scorer; + private Scorable scorer; private LeafCollector leafCollector; public MinimumScoreCollector(Collector collector, float minimumScore) { @@ -43,7 +43,7 @@ public MinimumScoreCollector(Collector collector, float minimumScore) { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { if (!(scorer instanceof ScoreCachingWrappingScorer)) { scorer = new ScoreCachingWrappingScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java b/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java index df17f8d7757b3..13a2a23ec56a6 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/ScorerAware.java @@ -18,10 +18,10 @@ */ package org.elasticsearch.common.lucene; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; public interface ScorerAware { - void setScorer(Scorer scorer); + void setScorer(Scorable scorer); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java index 5296926e9869d..204f69f1e0af0 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java @@ -19,14 +19,13 @@ package org.elasticsearch.common.lucene.search.function; -import java.io.IOException; - import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.ScoreCachingWrappingScorer; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; +import java.io.IOException; + /** A {@link Scorer} that filters out documents that have a score that is * lower than a configured constant. */ final class MinScoreScorer extends Scorer { @@ -34,13 +33,10 @@ final class MinScoreScorer extends Scorer { private final Scorer in; private final float minScore; + private float curScore; + MinScoreScorer(Weight weight, Scorer scorer, float minScore) { super(weight); - if (scorer instanceof ScoreCachingWrappingScorer == false) { - // when minScore is set, scores might be requested twice: once - // to verify the match, and once by the collector - scorer = new ScoreCachingWrappingScorer(scorer); - } this.in = scorer; this.minScore = minScore; } @@ -55,8 +51,8 @@ public int docID() { } @Override - public float score() throws IOException { - return in.score(); + public float score() { + return curScore; } @Override @@ -87,7 +83,8 @@ public boolean matches() throws IOException { if (inTwoPhase != null && inTwoPhase.matches() == false) { return false; } - return in.score() >= minScore; + curScore = in.score(); + return curScore >= minScore; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index bf1ea637a9671..5edc1659f54f7 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -20,9 +20,8 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.script.ExplainableSearchScript; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.Script; @@ -32,33 +31,19 @@ public class ScriptScoreFunction extends ScoreFunction { - static final class CannedScorer extends Scorer { + static final class CannedScorer extends Scorable { protected int docid; protected float score; - CannedScorer() { - super(null); - } - @Override public int docID() { return docid; } @Override - public float score() throws IOException { + public float score() { return score; } - - @Override - public DocIdSetIterator iterator() { - throw new UnsupportedOperationException(); - } - - @Override - public float getMaxScore(int upTo) throws IOException { - throw new UnsupportedOperationException(); - } } private final Script sScript; diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index e25d954aa4f1c..e87b3757e6b28 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; -import java.util.AbstractMap; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -54,7 +53,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; - private final Map, Function, Map.Entry>> settingUpgraders; + private final Map, SettingUpgrader> settingUpgraders; private final Setting.Property scope; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); @@ -70,12 +69,8 @@ protected AbstractScopedSettings( this.settingUpgraders = Collections.unmodifiableMap( - settingUpgraders - .stream() - .collect( - Collectors.toMap( - SettingUpgrader::getSetting, - u -> e -> new AbstractMap.SimpleEntry<>(u.getKey(e.getKey()), u.getValue(e.getValue()))))); + settingUpgraders.stream().collect(Collectors.toMap(SettingUpgrader::getSetting, Function.identity()))); + this.scope = scope; Map> complexMatchers = new HashMap<>(); @@ -786,15 +781,25 @@ public Settings upgradeSettings(final Settings settings) { boolean changed = false; // track if any settings were upgraded for (final String key : settings.keySet()) { final Setting setting = getRaw(key); - final Function, Map.Entry> upgrader = settingUpgraders.get(setting); + final SettingUpgrader upgrader = settingUpgraders.get(setting); if (upgrader == null) { // the setting does not have an upgrader, copy the setting builder.copy(key, settings); } else { // the setting has an upgrader, so mark that we have changed a setting and apply the upgrade logic changed = true; - final Map.Entry upgrade = upgrader.apply(new Entry(key, settings)); - builder.put(upgrade.getKey(), upgrade.getValue()); + // noinspection ConstantConditions + if (setting.getConcreteSetting(key).isListSetting()) { + final List value = settings.getAsList(key); + final String upgradedKey = upgrader.getKey(key); + final List upgradedValue = upgrader.getListValue(value); + builder.putList(upgradedKey, upgradedValue); + } else { + final String value = settings.get(key); + final String upgradedKey = upgrader.getKey(key); + final String upgradedValue = upgrader.getValue(value); + builder.put(upgradedKey, upgradedValue); + } } } // we only return a new instance if there was an upgrade diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index cb369d6cfda02..7e90aa3f44206 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -443,6 +443,9 @@ public void apply(Settings value, Settings current, Settings previous) { EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING ))); - public static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); + public static List> BUILT_IN_SETTING_UPGRADERS = Collections.unmodifiableList(Arrays.asList( + RemoteClusterAware.SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER, + RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER, + RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER)); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 89bbe752a1ffc..5244cdd726d05 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -345,6 +345,11 @@ boolean isGroupSetting() { return false; } + + final boolean isListSetting() { + return this instanceof ListSetting; + } + boolean hasComplexMatcher() { return isGroupSetting(); } @@ -453,7 +458,7 @@ public final String getRaw(final Settings settings) { * @return the raw string representation of the setting value */ String innerGetRaw(final Settings settings) { - return settings.get(getKey(), defaultValue.apply(settings)); + return settings.get(getKey(), defaultValue.apply(settings), isListSetting()); } /** Logs a deprecation warning if the setting is deprecated and used. */ @@ -1305,7 +1310,6 @@ public void diff(Settings.Builder builder, Settings source, Settings defaultSett } } } - } static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java b/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java index 91f2bead300d3..bc41b55490574 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.settings; +import java.util.List; + /** * Represents the logic to upgrade a setting. * @@ -51,4 +53,8 @@ default String getValue(final String value) { return value; } + default List getListValue(final List value) { + return value; + } + } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 2eb14f7ac6592..1aeed2aee5115 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -245,6 +245,30 @@ public String get(String setting, String defaultValue) { return retVal == null ? defaultValue : retVal; } + /** + * Returns the setting value associated with the setting key. If it does not exists, + * returns the default value provided. + */ + String get(String setting, String defaultValue, boolean isList) { + Object value = settings.get(setting); + if (value != null) { + if (value instanceof List) { + if (isList == false) { + throw new IllegalArgumentException( + "Found list type value for setting [" + setting + "] but but did not expect a list for it." + ); + } + } else if (isList) { + throw new IllegalArgumentException( + "Expected list type value for setting [" + setting + "] but found [" + value.getClass() + ']' + ); + } + return toString(value); + } else { + return defaultValue; + } + } + /** * Returns the setting value (as float) associated with the setting key. If it does not exists, * returns the default value provided. diff --git a/server/src/main/java/org/elasticsearch/http/HttpInfo.java b/server/src/main/java/org/elasticsearch/http/HttpInfo.java index 4e944a0f7fac8..aece813199479 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/server/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -19,35 +19,52 @@ package org.elasticsearch.http; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import static org.elasticsearch.common.Booleans.parseBoolean; + public class HttpInfo implements Writeable, ToXContentFragment { + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(HttpInfo.class)); + + /** Whether to add hostname to publish host field when serializing. */ + private static final boolean CNAME_IN_PUBLISH_HOST = + parseBoolean(System.getProperty("es.http.cname_in_publish_address"), false); + private final BoundTransportAddress address; private final long maxContentLength; + private final boolean cnameInPublishHost; public HttpInfo(StreamInput in) throws IOException { - address = BoundTransportAddress.readBoundTransportAddress(in); - maxContentLength = in.readLong(); + this(BoundTransportAddress.readBoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST); } - @Override - public void writeTo(StreamOutput out) throws IOException { - address.writeTo(out); - out.writeLong(maxContentLength); + public HttpInfo(BoundTransportAddress address, long maxContentLength) { + this(address, maxContentLength, CNAME_IN_PUBLISH_HOST); } - public HttpInfo(BoundTransportAddress address, long maxContentLength) { + HttpInfo(BoundTransportAddress address, long maxContentLength, boolean cnameInPublishHost) { this.address = address; this.maxContentLength = maxContentLength; + this.cnameInPublishHost = cnameInPublishHost; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + address.writeTo(out); + out.writeLong(maxContentLength); } static final class Fields { @@ -62,7 +79,21 @@ static final class Fields { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.HTTP); builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); - builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString()); + TransportAddress publishAddress = address.publishAddress(); + String publishAddressString = publishAddress.toString(); + String hostString = publishAddress.address().getHostString(); + if (InetAddresses.isInetAddress(hostString) == false) { + if (cnameInPublishHost) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } else { + DEPRECATION_LOGGER.deprecated( + "[http.publish_host] was printed as [ip:port] instead of [hostname/ip:port]. " + + "This format is deprecated and will change to [hostname/ip:port] in a future version. " + + "Use -Des.http.cname_in_publish_address=true to enforce non-deprecated formatting." + ); + } + } + builder.field(Fields.PUBLISH_ADDRESS, publishAddressString); builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength()); builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index fe27aea805eef..5ebe13577f4c2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -661,7 +661,7 @@ protected final void ensureOpen() { } /** get commits stats for the last commit */ - public CommitStats commitStats() { + public final CommitStats commitStats() { return new CommitStats(getLastCommittedSegmentInfos()); } @@ -678,12 +678,6 @@ public CommitStats commitStats() { */ public abstract void waitForOpsToComplete(long seqNo) throws InterruptedException; - /** - * Reset the local checkpoint in the tracker to the given local checkpoint - * @param localCheckpoint the new checkpoint to be set - */ - public abstract void resetLocalCheckpoint(long localCheckpoint); - /** * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint */ @@ -951,7 +945,9 @@ public final boolean refreshNeeded() { * * @return the commit Id for the resulting commit */ - public abstract CommitId flush() throws EngineException; + public final CommitId flush() throws EngineException { + return flush(false, false); + } /** @@ -1163,11 +1159,16 @@ public enum Origin { PRIMARY, REPLICA, PEER_RECOVERY, - LOCAL_TRANSLOG_RECOVERY; + LOCAL_TRANSLOG_RECOVERY, + LOCAL_RESET; public boolean isRecovery() { return this == PEER_RECOVERY || this == LOCAL_TRANSLOG_RECOVERY; } + + boolean isFromTranslog() { + return this == LOCAL_TRANSLOG_RECOVERY || this == LOCAL_RESET; + } } public Origin origin() { diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d9b03777f1b1b..52dd4d3fcd09e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -152,12 +152,6 @@ public class InternalEngine extends Engine { private final SoftDeletesPolicy softDeletesPolicy; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; - /** - * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this - * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents - * being indexed/deleted. - */ - private final AtomicLong writingBytes = new AtomicLong(); private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); @Nullable @@ -530,7 +524,7 @@ public String getHistoryUUID() { /** Returns how many bytes we are currently moving from indexing buffer to segments on disk */ @Override public long getWritingBytes() { - return writingBytes.get(); + return indexWriter.getFlushingBytes() + versionMap.getRefreshingBytes(); } /** @@ -735,6 +729,7 @@ private boolean canOptimizeAddDocument(Index index) { : "version: " + index.version() + " type: " + index.versionType(); return true; case LOCAL_TRANSLOG_RECOVERY: + case LOCAL_RESET: assert index.isRetry(); return true; // allow to optimize in order to update the max safe time stamp default: @@ -833,7 +828,7 @@ public IndexResult index(Index index) throws IOException { indexResult = new IndexResult( plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } - if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + if (index.origin().isFromTranslog() == false) { final Translog.Location location; if (indexResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Index(index, indexResult)); @@ -1173,7 +1168,7 @@ public DeleteResult delete(Delete delete) throws IOException { deleteResult = new DeleteResult( plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false); } - if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + if (delete.origin().isFromTranslog() == false) { final Translog.Location location; if (deleteResult.getResultType() == Result.Type.SUCCESS) { location = translog.add(new Translog.Delete(delete, deleteResult)); @@ -1411,7 +1406,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { } } final NoOpResult noOpResult = failure != null ? new NoOpResult(getPrimaryTerm(), noOp.seqNo(), failure) : new NoOpResult(getPrimaryTerm(), noOp.seqNo()); - if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + if (noOp.origin().isFromTranslog() == false) { final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); noOpResult.setTranslogLocation(location); } @@ -1437,9 +1432,6 @@ final void refresh(String source, SearcherScope scope) throws EngineException { // pass the new reader reference to the external reader manager. final long localCheckpointBeforeRefresh = getLocalCheckpoint(); - // this will also cause version map ram to be freed hence we always account for it. - final long bytes = indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh(); - writingBytes.addAndGet(bytes); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (store.tryIncRef()) { @@ -1465,8 +1457,6 @@ final void refresh(String source, SearcherScope scope) throws EngineException { e.addSuppressed(inner); } throw new RefreshFailedEngineException(shardId, e); - } finally { - writingBytes.addAndGet(-bytes); } assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " + "local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint(); @@ -1576,11 +1566,6 @@ public boolean shouldPeriodicallyFlush() { || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } - @Override - public CommitId flush() throws EngineException { - return flush(false, false); - } - @Override public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { ensureOpen(); @@ -2340,11 +2325,6 @@ public void waitForOpsToComplete(long seqNo) throws InterruptedException { localCheckpointTracker.waitForOpsToComplete(seqNo); } - @Override - public void resetLocalCheckpoint(long localCheckpoint) { - localCheckpointTracker.resetCheckpoint(localCheckpoint); - } - @Override public SeqNoStats getSeqNoStats(long globalCheckpoint) { return localCheckpointTracker.getStats(globalCheckpoint); diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 18d3cedb37e60..d0dd9466b6075 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -434,6 +434,14 @@ long ramBytesUsedForRefresh() { return maps.current.ramBytesUsed.get(); } + /** + * Returns how much RAM is current being freed up by refreshing. This is {@link #ramBytesUsed()} + * except does not include tombstones because they don't clear on refresh. + */ + long getRefreshingBytes() { + return maps.old.ramBytesUsed.get(); + } + @Override public Collection getChildResources() { // TODO: useful to break down RAM usage here? diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java new file mode 100644 index 0000000000000..b958bd84b76a6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -0,0 +1,368 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.search.SearcherManager; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.Lock; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogStats; + +import java.io.Closeable; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Stream; + +/** + * A basic read-only engine that allows switching a shard to be true read-only temporarily or permanently. + * Note: this engine can be opened side-by-side with a read-write engine but will not reflect any changes made to the read-write + * engine. + * + * @see #ReadOnlyEngine(EngineConfig, SeqNoStats, TranslogStats, boolean, Function) + */ +public final class ReadOnlyEngine extends Engine { + + private final SegmentInfos lastCommittedSegmentInfos; + private final SeqNoStats seqNoStats; + private final TranslogStats translogStats; + private final SearcherManager searcherManager; + private final IndexCommit indexCommit; + private final Lock indexWriterLock; + + /** + * Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened + * read-write engine. It allows to optionally obtain the writer locks for the shard which would time-out if another + * engine is still open. + * + * @param config the engine configuration + * @param seqNoStats sequence number statistics for this engine or null if not provided + * @param translogStats translog stats for this engine or null if not provided + * @param obtainLock if true this engine will try to obtain the {@link IndexWriter#WRITE_LOCK_NAME} lock. Otherwise + * the lock won't be obtained + * @param readerWrapperFunction allows to wrap the index-reader for this engine. + */ + public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats translogStats, boolean obtainLock, + Function readerWrapperFunction) { + super(config); + try { + Store store = config.getStore(); + store.incRef(); + DirectoryReader reader = null; + Directory directory = store.directory(); + Lock indexWriterLock = null; + boolean success = false; + try { + // we obtain the IW lock even though we never modify the index. + // yet this makes sure nobody else does. including some testing tools that try to be messy + indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null; + this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); + this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats; + this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; + reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), config.getShardId()); + if (config.getIndexSettings().isSoftDeleteEnabled()) { + reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); + } + reader = readerWrapperFunction.apply(reader); + this.indexCommit = reader.getIndexCommit(); + this.searcherManager = new SearcherManager(reader, + new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); + this.indexWriterLock = indexWriterLock; + success = true; + } finally { + if (success == false) { + IOUtils.close(reader, indexWriterLock, store::decRef); + } + } + } catch (IOException e) { + throw new UncheckedIOException(e); // this is stupid + } + } + + @Override + protected void closeNoLock(String reason, CountDownLatch closedLatch) { + if (isClosed.compareAndSet(false, true)) { + try { + IOUtils.close(searcherManager, indexWriterLock, store::decRef); + } catch (Exception ex) { + logger.warn("failed to close searcher", ex); + } finally { + closedLatch.countDown(); + } + } + } + + public static SeqNoStats buildSeqNoStats(SegmentInfos infos) { + final SequenceNumbers.CommitInfo seqNoStats = + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(infos.userData.entrySet()); + long maxSeqNo = seqNoStats.maxSeqNo; + long localCheckpoint = seqNoStats.localCheckpoint; + return new SeqNoStats(maxSeqNo, localCheckpoint, localCheckpoint); + } + + @Override + public GetResult get(Get get, BiFunction searcherFactory) throws EngineException { + return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); + } + + @Override + protected ReferenceManager getReferenceManager(SearcherScope scope) { + return searcherManager; + } + + @Override + protected SegmentInfos getLastCommittedSegmentInfos() { + return lastCommittedSegmentInfos; + } + + @Override + public String getHistoryUUID() { + return lastCommittedSegmentInfos.userData.get(Engine.HISTORY_UUID_KEY); + } + + @Override + public long getWritingBytes() { + return 0; + } + + @Override + public long getIndexThrottleTimeInMillis() { + return 0; + } + + @Override + public boolean isThrottled() { + return false; + } + + @Override + public IndexResult index(Index index) { + assert false : "this should not be called"; + throw new UnsupportedOperationException("indexing is not supported on a read-only engine"); + } + + @Override + public DeleteResult delete(Delete delete) { + assert false : "this should not be called"; + throw new UnsupportedOperationException("deletes are not supported on a read-only engine"); + } + + @Override + public NoOpResult noOp(NoOp noOp) { + assert false : "this should not be called"; + throw new UnsupportedOperationException("no-ops are not supported on a read-only engine"); + } + + @Override + public boolean isTranslogSyncNeeded() { + return false; + } + + @Override + public boolean ensureTranslogSynced(Stream locations) { + return false; + } + + @Override + public void syncTranslog() { + } + + @Override + public Closeable acquireRetentionLockForPeerRecovery() { + return () -> {}; + } + + @Override + public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo, + boolean requiredFullRange) throws IOException { + return readHistoryOperations(source, mapperService, fromSeqNo); + } + + @Override + public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + return new Translog.Snapshot() { + @Override + public void close() { } + @Override + public int totalOperations() { + return 0; + } + @Override + public Translog.Operation next() { + return null; + } + }; + } + + @Override + public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { + return 0; + } + + @Override + public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { + return false; + } + + @Override + public TranslogStats getTranslogStats() { + return translogStats; + } + + @Override + public Translog.Location getTranslogLastWriteLocation() { + return new Translog.Location(0,0,0); + } + + @Override + public long getLocalCheckpoint() { + return seqNoStats.getLocalCheckpoint(); + } + + @Override + public void waitForOpsToComplete(long seqNo) { + } + + @Override + public SeqNoStats getSeqNoStats(long globalCheckpoint) { + return new SeqNoStats(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), globalCheckpoint); + } + + @Override + public long getLastSyncedGlobalCheckpoint() { + return seqNoStats.getGlobalCheckpoint(); + } + + @Override + public long getIndexBufferRAMBytesUsed() { + return 0; + } + + @Override + public List segments(boolean verbose) { + return Arrays.asList(getSegmentInfo(lastCommittedSegmentInfos, verbose)); + } + + @Override + public void refresh(String source) { + // we could allow refreshes if we want down the road the searcher manager will then reflect changes to a rw-engine + // opened side-by-side + } + + @Override + public void writeIndexingBuffer() throws EngineException { + } + + @Override + public boolean shouldPeriodicallyFlush() { + return false; + } + + @Override + public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) { + // we can't do synced flushes this would require an indexWriter which we don't have + throw new UnsupportedOperationException("syncedFlush is not supported on a read-only engine"); + } + + @Override + public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { + return new CommitId(lastCommittedSegmentInfos.getId()); + } + + @Override + public void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, + boolean upgrade, boolean upgradeOnlyAncientSegments) { + } + + @Override + public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) { + store.incRef(); + return new IndexCommitRef(indexCommit, store::decRef); + } + + @Override + public IndexCommitRef acquireSafeIndexCommit() { + return acquireLastIndexCommit(false); + } + + @Override + public void activateThrottling() { + } + + @Override + public void deactivateThrottling() { + } + + @Override + public void trimUnreferencedTranslogFiles() { + } + + @Override + public boolean shouldRollTranslogGeneration() { + return false; + } + + @Override + public void rollTranslogGeneration() { + } + + @Override + public void restoreLocalCheckpointFromTranslog() { + } + + @Override + public int fillSeqNoGaps(long primaryTerm) { + return 0; + } + + @Override + public Engine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) { + return this; + } + + @Override + public void skipTranslogRecovery() { + } + + @Override + public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) { + } + + @Override + public void maybePruneDeletes() { + } +} diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index eaa16e9f07db8..8e0a31859a132 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; @@ -71,7 +71,7 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx return indexFieldData.load(context).getBytesValues(); } - protected void setScorer(Scorer scorer) {} + protected void setScorer(Scorable scorer) {} @Override public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) { @@ -101,7 +101,7 @@ protected SortedDocValues getSortedDocValues(LeafReaderContext context, String f } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { BytesRefFieldComparatorSource.this.setScorer(scorer); } @@ -125,7 +125,7 @@ protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String f } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { BytesRefFieldComparatorSource.this.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 43bc19a12a384..1ae3fb692ec61 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -23,7 +23,7 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.Nullable; @@ -57,7 +57,7 @@ protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws return indexFieldData.load(context).getDoubleValues(); } - protected void setScorer(Scorer scorer) {} + protected void setScorer(Scorable scorer) {} @Override public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) { @@ -81,7 +81,7 @@ protected NumericDocValues getNumericDocValues(LeafReaderContext context, String return selectedValues.getRawDoubleValues(); } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { DoubleValuesComparatorSource.this.setScorer(scorer); } }; diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java index c64f1b1e403c8..6cfe7d177da79 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilder.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.function.ScoreFunction; import org.elasticsearch.common.lucene.search.function.WeightFactorFunction; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryShardContext; @@ -46,7 +45,7 @@ public ScoreFunctionBuilder() { * Read from a stream. */ public ScoreFunctionBuilder(StreamInput in) throws IOException { - weight = in.readOptionalFloat(); + weight = checkWeight(in.readOptionalFloat()); } @Override @@ -70,10 +69,17 @@ public final void writeTo(StreamOutput out) throws IOException { */ @SuppressWarnings("unchecked") public final FB setWeight(float weight) { - this.weight = weight; + this.weight = checkWeight(weight); return (FB) this; } + private Float checkWeight(Float weight) { + if (weight != null && Float.compare(weight, 0) < 0) { + throw new IllegalArgumentException("[weight] cannot be negative for a filtering function"); + } + return weight; + } + /** * The weight applied to the function before combining. */ diff --git a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java index cd33c1bf046ed..9fad96940b87b 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/LocalCheckpointTracker.java @@ -109,6 +109,7 @@ public synchronized void markSeqNoAsCompleted(final long seqNo) { * @param checkpoint the local checkpoint to reset this tracker to */ public synchronized void resetCheckpoint(final long checkpoint) { + // TODO: remove this method as after we restore the local history on promotion. assert checkpoint != SequenceNumbers.UNASSIGNED_SEQ_NO; assert checkpoint <= this.checkpoint; processedSeqNo.clear(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java b/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java index e279badec4a04..825a8a8a48354 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java +++ b/server/src/main/java/org/elasticsearch/index/shard/GlobalCheckpointListeners.java @@ -97,7 +97,6 @@ synchronized void add(final long currentGlobalCheckpoint, final GlobalCheckpoint if (lastKnownGlobalCheckpoint > currentGlobalCheckpoint) { // notify directly executor.execute(() -> notifyListener(listener, lastKnownGlobalCheckpoint, null)); - return; } else { if (listeners == null) { listeners = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index bceb106aeef91..4bb56c8b0d3b5 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -163,7 +163,6 @@ import java.util.stream.StreamSupport; import static org.elasticsearch.index.mapper.SourceToParse.source; -import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { @@ -1273,16 +1272,18 @@ public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine return result; } - // package-private for testing - int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOException { - recoveryState.getTranslog().totalOperations(snapshot.totalOperations()); - recoveryState.getTranslog().totalOperationsOnStart(snapshot.totalOperations()); + /** + * Replays translog operations from the provided translog {@code snapshot} to the current engine using the given {@code origin}. + * The callback {@code onOperationRecovered} is notified after each translog operation is replayed successfully. + */ + int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot, Engine.Operation.Origin origin, + Runnable onOperationRecovered) throws IOException { int opsRecovered = 0; Translog.Operation operation; while ((operation = snapshot.next()) != null) { try { logger.trace("[translog] recover op {}", operation); - Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY); + Engine.Result result = applyTranslogOperation(operation, origin); switch (result.getResultType()) { case FAILURE: throw result.getFailure(); @@ -1295,7 +1296,7 @@ int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOExce } opsRecovered++; - recoveryState.getTranslog().incrementRecoveredOperations(); + onOperationRecovered.run(); } catch (Exception e) { if (ExceptionsHelper.status(e) == RestStatus.BAD_REQUEST) { // mainly for MapperParsingException and Failure to detect xcontent @@ -1313,8 +1314,15 @@ int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOExce * Operations from the translog will be replayed to bring lucene up to date. **/ public void openEngineAndRecoverFromTranslog() throws IOException { + final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog(); + final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> { + translogRecoveryStats.totalOperations(snapshot.totalOperations()); + translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations()); + return runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, + translogRecoveryStats::incrementRecoveredOperations); + }; innerOpenEngineAndTranslog(); - getEngine().recoverFromTranslog(this::runTranslogRecovery, Long.MAX_VALUE); + getEngine().recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE); } /** @@ -1352,11 +1360,7 @@ private void innerOpenEngineAndTranslog() throws IOException { final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); - - assertMaxUnsafeAutoIdInCommit(); - - final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); - store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + trimUnsafeCommits(); createNewEngine(config); verifyNotClosed(); @@ -1367,6 +1371,15 @@ private void innerOpenEngineAndTranslog() throws IOException { assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); } + private void trimUnsafeCommits() throws IOException { + assert currentEngineReference.get() == null : "engine is running"; + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + assertMaxUnsafeAutoIdInCommit(); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, indexSettings.getIndexVersionCreated()); + } + private boolean assertSequenceNumbersInCommit() throws IOException { final Map userData = SegmentInfos.readLatestCommit(store.directory()).getUserData(); assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint"; @@ -1463,7 +1476,7 @@ private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIn if (origin == Engine.Operation.Origin.PRIMARY) { assert assertPrimaryMode(); } else { - assert origin == Engine.Operation.Origin.REPLICA; + assert origin == Engine.Operation.Origin.REPLICA || origin == Engine.Operation.Origin.LOCAL_RESET; assert assertReplicationTarget(); } if (writeAllowedStates.contains(state) == false) { @@ -2166,9 +2179,7 @@ public void onFailedEngine(String reason, @Nullable Exception failure) { private Engine createNewEngine(EngineConfig config) { synchronized (mutex) { - if (state == IndexShardState.CLOSED) { - throw new AlreadyClosedException(shardId + " can't create engine - shard is closed"); - } + verifyNotClosed(); assert this.currentEngineReference.get() == null; Engine engine = newEngine(config); onNewEngine(engine); // call this before we pass the memory barrier otherwise actions that happen @@ -2314,19 +2325,14 @@ public void acquireReplicaOperationPermit(final long opPrimaryTerm, final long g bumpPrimaryTerm(opPrimaryTerm, () -> { updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); final long currentGlobalCheckpoint = getGlobalCheckpoint(); - final long localCheckpoint; - if (currentGlobalCheckpoint == UNASSIGNED_SEQ_NO) { - localCheckpoint = NO_OPS_PERFORMED; + final long maxSeqNo = seqNoStats().getMaxSeqNo(); + logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]", + opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo); + if (currentGlobalCheckpoint < maxSeqNo) { + resetEngineToGlobalCheckpoint(); } else { - localCheckpoint = currentGlobalCheckpoint; + getEngine().rollTranslogGeneration(); } - logger.trace( - "detected new primary with primary term [{}], resetting local checkpoint from [{}] to [{}]", - opPrimaryTerm, - getLocalCheckpoint(), - localCheckpoint); - getEngine().resetLocalCheckpoint(localCheckpoint); - getEngine().rollTranslogGeneration(); }); } } @@ -2687,4 +2693,26 @@ public ParsedDocument newNoopTombstoneDoc(String reason) { } }; } + + /** + * Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint. + */ + void resetEngineToGlobalCheckpoint() throws IOException { + assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]"; + sync(); // persist the global checkpoint to disk + final long globalCheckpoint = getGlobalCheckpoint(); + final Engine newEngine; + synchronized (mutex) { + verifyNotClosed(); + IOUtils.close(currentEngineReference.getAndSet(null)); + trimUnsafeCommits(); + newEngine = createNewEngine(newEngineConfig()); + active.set(true); + } + final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery( + engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> { + // TODO: add a dedicate recovery stats for the reset translog + }); + newEngine.recoverFromTranslog(translogRunner, globalCheckpoint); + } } diff --git a/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java b/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java index e8c433347b916..b3cdecb3e0485 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreAccessor.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.search.lookup.DocLookup; import java.io.IOException; @@ -32,9 +32,9 @@ */ public final class ScoreAccessor extends Number implements Comparable { - Scorer scorer; + Scorable scorer; - public ScoreAccessor(Scorer scorer) { + public ScoreAccessor(Scorable scorer) { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScript.java b/server/src/main/java/org/elasticsearch/script/ScoreScript.java index d9e56d5573cae..11b135e9a65af 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScript.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScript.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; @@ -33,40 +33,40 @@ * A script used for adjusting the score on a per document basis. */ public abstract class ScoreScript { - + public static final String[] PARAMETERS = new String[]{}; - + /** The generic runtime parameters for the script. */ private final Map params; - + /** A leaf lookup for the bound segment this script will operate on. */ private final LeafSearchLookup leafLookup; - + private DoubleSupplier scoreSupplier = () -> 0.0; - + public ScoreScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { this.params = params; this.leafLookup = lookup.getLeafSearchLookup(leafContext); } - + public abstract double execute(); - + /** Return the parameters for this script. */ public Map getParams() { return params; } - + /** The doc lookup for the Lucene segment this script was created for. */ public final Map> getDoc() { return leafLookup.doc(); } - + /** Set the current document to run the script on next. */ public void setDocument(int docid) { leafLookup.setDocument(docid); } - - public void setScorer(Scorer scorer) { + + public void setScorer(Scorable scorer) { this.scoreSupplier = () -> { try { return scorer.score(); @@ -75,28 +75,28 @@ public void setScorer(Scorer scorer) { } }; } - + public double get_score() { return scoreSupplier.getAsDouble(); } - + /** A factory to construct {@link ScoreScript} instances. */ public interface LeafFactory { - + /** * Return {@code true} if the script needs {@code _score} calculated, or {@code false} otherwise. */ boolean needs_score(); - + ScoreScript newInstance(LeafReaderContext ctx) throws IOException; } - + /** A factory to construct stateful {@link ScoreScript} factories for a specific index. */ public interface Factory { - + ScoreScript.LeafFactory newFactory(Map params, SearchLookup lookup); - + } - + public static final ScriptContext CONTEXT = new ScriptContext<>("score", ScoreScript.Factory.class); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java index 9f6ea999a9306..e72d597a6afb4 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptedMetricAggContexts.java @@ -20,7 +20,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.search.lookup.LeafSearchLookup; @@ -66,7 +66,7 @@ public interface Factory { public abstract static class MapScript extends ParamsAndStateBase { private final LeafSearchLookup leafLookup; - private Scorer scorer; + private Scorable scorer; public MapScript(Map params, Map state, SearchLookup lookup, LeafReaderContext leafContext) { super(params, state); @@ -86,7 +86,7 @@ public void setDocument(int docId) { } } - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/script/SearchScript.java b/server/src/main/java/org/elasticsearch/script/SearchScript.java index 43ea020aa6e24..fb5f950d61d7e 100644 --- a/server/src/main/java/org/elasticsearch/script/SearchScript.java +++ b/server/src/main/java/org/elasticsearch/script/SearchScript.java @@ -19,7 +19,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.search.lookup.LeafDocLookup; @@ -46,22 +46,14 @@ public abstract class SearchScript implements ScorerAware, ExecutableScript { /** The generic runtime parameters for the script. */ private final Map params; - /** A lookup for the index this script will operate on. */ - private final SearchLookup lookup; - - /** A leaf lookup for the bound segment this script will operate on. */ - private final LeafReaderContext leafContext; - /** A leaf lookup for the bound segment this script will operate on. */ private final LeafSearchLookup leafLookup; /** A scorer that will return the score for the current document when the script is run. */ - private Scorer scorer; + private Scorable scorer; public SearchScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { this.params = params; - this.lookup = lookup; - this.leafContext = leafContext; // TODO: remove leniency when painless does not implement SearchScript for executable script cases this.leafLookup = leafContext == null ? null : lookup.getLeafSearchLookup(leafContext); } @@ -76,11 +68,6 @@ protected final LeafSearchLookup getLeafLookup() { return leafLookup; } - /** The leaf context for the Lucene segment this script was created for. */ - protected final LeafReaderContext getLeafContext() { - return leafContext; - } - /** The doc lookup for the Lucene segment this script was created for. */ public final LeafDocLookup getDoc() { // TODO: remove leniency when painless does not implement SearchScript for executable script cases @@ -96,7 +83,7 @@ public void setDocument(int docid) { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 59b63520a1bd3..d6e7aca46a63b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -20,8 +20,8 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; @@ -110,10 +110,10 @@ public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx) { collectors.set(i, null); } return new LeafBucketCollector() { - Scorer scorer; + Scorable scorer; @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { this.scorer = scorer; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java index f5b7f15bb9403..367e1cce0608d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollector.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import java.io.IOException; import java.util.stream.Stream; @@ -33,7 +33,7 @@ public abstract class LeafBucketCollector implements LeafCollector { public static final LeafBucketCollector NO_OP_COLLECTOR = new LeafBucketCollector() { @Override - public void setScorer(Scorer arg0) throws IOException { + public void setScorer(Scorable arg0) throws IOException { // no-op } @Override @@ -55,7 +55,7 @@ public static LeafBucketCollector wrap(Iterable collectors) return new LeafBucketCollector() { @Override - public void setScorer(Scorer s) throws IOException { + public void setScorer(Scorable s) throws IOException { for (LeafBucketCollector c : colls) { c.setScorer(s); } @@ -83,7 +83,7 @@ public final void collect(int doc) throws IOException { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { // no-op by default } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java index 45e7db08e2d27..529483107b192 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/LeafBucketCollectorBase.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import java.io.IOException; @@ -48,7 +48,7 @@ public LeafBucketCollectorBase(LeafBucketCollector sub, Object values) { } @Override - public void setScorer(Scorer s) throws IOException { + public void setScorer(Scorable s) throws IOException { sub.setScorer(s); if (values != null) { values.setScorer(s); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java index 624c8d5409a56..552ad8c024ddc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java @@ -24,9 +24,9 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreCachingWrappingScorer; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import java.io.IOException; import java.util.ArrayList; @@ -174,7 +174,7 @@ private MultiLeafBucketCollector(List collectors, boolean c } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { if (cacheScores) { scorer = new ScoreCachingWrappingScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index 4e63d693d1875..9ef72ad17d8fb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -19,11 +19,10 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopScoreDocCollector; @@ -89,7 +88,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOExce // Deferring collector return new LeafBucketCollector() { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { perSegCollector.setScorer(scorer); } @@ -156,7 +155,7 @@ class PerParentBucketSamples { private long parentBucket; private int matchedDocs; - PerParentBucketSamples(long parentBucket, Scorer scorer, LeafReaderContext readerContext) { + PerParentBucketSamples(long parentBucket, Scorable scorer, LeafReaderContext readerContext) { try { this.parentBucket = parentBucket; tdc = createTopDocsCollector(shardSize); @@ -185,7 +184,7 @@ public void collect(int doc) throws IOException { currentLeafCollector.collect(doc); } - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { currentLeafCollector.setScorer(scorer); } @@ -198,19 +197,18 @@ public int getDocCount() { } } - class PerSegmentCollects extends Scorer { + class PerSegmentCollects extends Scorable { private LeafReaderContext readerContext; int maxDocId = Integer.MIN_VALUE; private float currentScore; private int currentDocId = -1; - private Scorer currentScorer; + private Scorable currentScorer; PerSegmentCollects(LeafReaderContext readerContext) throws IOException { // The publisher behaviour for Reader/Scorer listeners triggers a // call to this constructor with a null scorer so we can't call // scorer.getWeight() and pass the Weight to our base class. // However, passing null seems to have no adverse effects here... - super(null); this.readerContext = readerContext; for (int i = 0; i < perBucketSamples.size(); i++) { PerParentBucketSamples perBucketSample = perBucketSamples.get(i); @@ -221,7 +219,7 @@ class PerSegmentCollects extends Scorer { } } - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { this.currentScorer = scorer; for (int i = 0; i < perBucketSamples.size(); i++) { PerParentBucketSamples perBucketSample = perBucketSamples.get(i); @@ -266,11 +264,6 @@ public int docID() { return currentDocId; } - @Override - public DocIdSetIterator iterator() { - throw new ElasticsearchException("This caching scorer implementation only implements score() and docID()"); - } - public void collect(int docId, long parentBucket) throws IOException { perBucketSamples = bigArrays.grow(perBucketSamples, parentBucket + 1); PerParentBucketSamples sampler = perBucketSamples.get((int) parentBucket); @@ -282,10 +275,6 @@ public void collect(int docId, long parentBucket) throws IOException { maxDocId = Math.max(maxDocId, docId); } - @Override - public float getMaxScore(int upTo) throws IOException { - return Float.MAX_VALUE; - } } public int getDocCount(long parentBucket) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java index a0c287f6eac51..345b21d03887e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -20,11 +20,11 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; @@ -70,7 +70,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final ScriptedMetricAggContexts.MapScript leafMapScript = mapScript.newInstance(ctx); return new LeafBucketCollectorBase(sub, leafMapScript) { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { leafMapScript.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java index ddd62b82500ac..c017eb4a5e3bc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregator.java @@ -21,15 +21,14 @@ import com.carrotsearch.hppc.LongObjectHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; @@ -106,10 +105,10 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol final LongObjectHashMap leafCollectors = new LongObjectHashMap<>(1); return new LeafBucketCollectorBase(sub, null) { - Scorer scorer; + Scorable scorer; @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { this.scorer = scorer; super.setScorer(scorer); for (ObjectCursor cursor : leafCollectors.values()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java index 25e3d38af5baa..4e6760f44fe90 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSource.java @@ -26,7 +26,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.util.CollectionUtils; @@ -295,7 +295,7 @@ static class LongValues extends AbstractSortingNumericDocValues implements Score } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } @@ -326,7 +326,7 @@ static class DoubleValues extends SortingNumericDoubleValues implements ScorerAw } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } @@ -445,7 +445,7 @@ static class BytesValues extends SortingBinaryDocValues implements ScorerAware { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java index 5ec1858487e09..144e08ce6f275 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptBytesValues.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.values; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -85,7 +85,7 @@ public boolean advanceExact(int doc) throws IOException { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java index 1227efb5ea0af..4bb531c0d40d7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.values; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; import org.elasticsearch.script.SearchScript; @@ -107,7 +107,7 @@ private static double toDoubleValue(Object o) { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java index cdc448bd04130..c57afa1960d97 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.support.values; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.util.LongValues; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; @@ -106,7 +106,7 @@ private static long toLongValue(Object o) { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { script.setScorer(scorer); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 3ef3064697a72..97e5b70f9da51 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -46,6 +46,7 @@ import java.util.HashMap; import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; /** * Query sub phase which pulls data from doc values @@ -77,6 +78,15 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept hits = hits.clone(); // don't modify the incoming hits Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); + List noFormatFields = context.docValueFieldsContext().fields().stream().filter(f -> f.format == null).map(f -> f.field) + .collect(Collectors.toList()); + if (noFormatFields.isEmpty() == false) { + DEPRECATION_LOGGER.deprecated("There are doc-value fields which are not using a format. The output will " + + "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " + + "[format={}] with a doc value field in order to opt in for the future behaviour and ease the migration to " + + "7.0: {}", DocValueFieldsContext.USE_DEFAULT_FORMAT, noFormatFields); + } + for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) { String field = fieldAndFormat.field; MappedFieldType fieldType = context.mapperService().fullName(field); @@ -84,10 +94,6 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept final IndexFieldData indexFieldData = context.getForField(fieldType); final DocValueFormat format; if (fieldAndFormat.format == null) { - DEPRECATION_LOGGER.deprecated("Doc-value field [" + fieldAndFormat.field + "] is not using a format. The output will " + - "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " + - "[format={}] with the doc value field in order to opt in for the future behaviour and ease the migration to " + - "7.0.", DocValueFieldsContext.USE_DEFAULT_FORMAT); format = null; } else { String formatDesc = fieldAndFormat.format; diff --git a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java index 4db67967dcb2b..cc84b1cfb668f 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.profile.aggregation; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.profile.Timer; @@ -46,7 +46,7 @@ public void collect(int doc, long bucket) throws IOException { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { delegate.setScorer(scorer); } diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java index 940e3902954b5..b900cb04f79dd 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java @@ -24,8 +24,8 @@ import org.apache.lucene.search.FilterCollector; import org.apache.lucene.search.FilterLeafCollector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; import java.io.IOException; @@ -76,7 +76,7 @@ public void collect(int doc) throws IOException { } @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { final long start = System.nanoTime(); try { super.setScorer(scorer); diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java index 8913f484847e6..7899750461e52 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java @@ -71,7 +71,7 @@ public Weight getWeight() { } @Override - public Collection getChildren() throws IOException { + public Collection getChildren() throws IOException { return scorer.getChildren(); } diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 4759027ee51b0..1b71c51d4162b 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -351,7 +351,7 @@ public BytesRef binaryValue() { return FieldData.singleton(values); } @Override - protected void setScorer(Scorer scorer) { + protected void setScorer(Scorable scorer) { leafScript.setScorer(scorer); } }; @@ -376,7 +376,7 @@ public double doubleValue() { return FieldData.singleton(values); } @Override - protected void setScorer(Scorer scorer) { + protected void setScorer(Scorable scorer) { leafScript.setScorer(scorer); } }; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index f08ef75612f7f..1c87af4a442d8 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -66,6 +67,20 @@ public abstract class RemoteClusterAware extends AbstractComponent { Setting.Property.Dynamic, Setting.Property.NodeScope)); + public static final SettingUpgrader> SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER = new SettingUpgrader>() { + + @Override + public Setting> getSetting() { + return SEARCH_REMOTE_CLUSTERS_SEEDS; + } + + @Override + public String getKey(final String key) { + return key.replaceFirst("^search", "cluster"); + } + + }; + /** * A list of initial seed nodes to discover eligible nodes from the remote cluster */ @@ -105,6 +120,20 @@ public abstract class RemoteClusterAware extends AbstractComponent { Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); + public static final SettingUpgrader SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER = new SettingUpgrader() { + + @Override + public Setting getSetting() { + return SEARCH_REMOTE_CLUSTERS_PROXY; + } + + @Override + public String getKey(final String key) { + return key.replaceFirst("^search", "cluster"); + } + + }; + /** * A proxy address for the remote cluster. * NOTE: this settings is undocumented until we have at last one transport that supports passing diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 0e8bd5cb28db5..04cb1ab3e5689 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -19,8 +19,6 @@ package org.elasticsearch.transport; -import java.util.Collection; -import java.util.function.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -35,6 +33,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; @@ -43,6 +42,7 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -55,6 +55,7 @@ import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -132,6 +133,20 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope), REMOTE_CLUSTERS_SEEDS); + public static final SettingUpgrader SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER = new SettingUpgrader() { + + @Override + public Setting getSetting() { + return SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE; + } + + @Override + public String getKey(final String key) { + return key.replaceFirst("^search", "cluster"); + } + + }; + public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( "cluster.remote.", diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java index bd43182f00756..ce60b14b3efc7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -47,6 +47,7 @@ import java.util.HashSet; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.equalTo; public class TransportResizeActionTests extends ESTestCase { @@ -92,6 +93,16 @@ public void testErrorCondition() { ).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards ")); + IllegalArgumentException softDeletesError = expectThrows(IllegalArgumentException.class, () -> { + ResizeRequest req = new ResizeRequest("target", "source"); + req.getTargetIndexRequest().settings(Settings.builder().put("index.soft_deletes.enabled", false)); + ClusterState clusterState = createClusterState("source", 8, 1, + Settings.builder().put("index.blocks.write", true).put("index.soft_deletes.enabled", true).build()); + TransportResizeAction.prepareCreateIndexRequest(req, clusterState, + (i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), "source", "target"); + }); + assertThat(softDeletesError.getMessage(), equalTo("Can't disable [index.soft_deletes.enabled] setting on resize")); + // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 24f5a69656114..abb34f80eac0e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -261,6 +261,7 @@ public void testPrepareResizeIndexSettings() { .put("index.version.upgraded", upgraded) .put("index.similarity.default.type", "BM25") .put("index.analysis.analyzer.default.tokenizer", "keyword") + .put("index.soft_deletes.enabled", "true") .build(); runPrepareResizeIndexSettingsTest( indexSettings, @@ -277,6 +278,7 @@ public void testPrepareResizeIndexSettings() { assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); + assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true")); }); } @@ -337,6 +339,15 @@ public void testPrepareResizeIndexSettingsSimilaritySettings() { } + public void testDoNotOverrideSoftDeletesSettingOnResize() { + runPrepareResizeIndexSettingsTest( + Settings.builder().put("index.soft_deletes.enabled", "false").build(), + Settings.builder().put("index.soft_deletes.enabled", "true").build(), + Collections.emptyList(), + randomBoolean(), + settings -> assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true"))); + } + private void runPrepareResizeIndexSettingsTest( final Settings sourceSettings, final Settings requestSettings, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index 536e3cbb7e08d..eeec65f0e2e29 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -134,8 +134,8 @@ public void testShrink() { // we don't handle shrink yet ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, 0); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, 0), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, 0), null, true, ShardRoutingState.UNASSIGNED, + RecoverySource.LocalShardsRecoverySource.INSTANCE); assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), routingAllocation)); @@ -164,8 +164,8 @@ public void testSourceNotActive() { RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, ShardRoutingState.UNASSIGNED, + RecoverySource.LocalShardsRecoverySource.INSTANCE); assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), routingAllocation)); @@ -204,8 +204,8 @@ public void testSourcePrimaryActive() { RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); int shardId = randomIntBetween(0, 3); int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource - .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, ShardRoutingState.UNASSIGNED, + RecoverySource.LocalShardsRecoverySource.INSTANCE); assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); String allowedNode = clusterState.getRoutingTable().index("source").shard(sourceShardId).primaryShard().currentNodeId(); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java index d60458cf82642..e9685c03bc4b8 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java @@ -19,9 +19,14 @@ package org.elasticsearch.common.lucene.search.function; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -36,7 +41,7 @@ private static DocIdSetIterator iterator(final int... docs) { return new DocIdSetIterator() { int i = -1; - + @Override public int nextDoc() throws IOException { if (i + 1 == docs.length) { @@ -45,17 +50,17 @@ public int nextDoc() throws IOException { return docs[++i]; } } - + @Override public int docID() { return i < 0 ? -1 : i == docs.length ? NO_MORE_DOCS : docs[i]; } - + @Override public long cost() { return docs.length; } - + @Override public int advance(int target) throws IOException { return slowAdvance(target); @@ -63,9 +68,36 @@ public int advance(int target) throws IOException { }; } + private static Weight fakeWeight() { + return new Weight(new MatchAllDocsQuery()) { + @Override + public void extractTerms(Set terms) { + + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + return null; + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + return null; + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return false; + } + }; + } + private static Scorer scorer(int maxDoc, final int[] docs, final float[] scores, final boolean twoPhase) { final DocIdSetIterator iterator = twoPhase ? DocIdSetIterator.all(maxDoc) : iterator(docs); - return new Scorer(null) { + return new Scorer(fakeWeight()) { + + int lastScoredDoc = -1; + public DocIdSetIterator iterator() { if (twoPhase) { return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator()); @@ -77,12 +109,12 @@ public DocIdSetIterator iterator() { public TwoPhaseIterator twoPhaseIterator() { if (twoPhase) { return new TwoPhaseIterator(iterator) { - + @Override public boolean matches() throws IOException { return Arrays.binarySearch(docs, iterator.docID()) >= 0; } - + @Override public float matchCost() { return 10; @@ -100,6 +132,8 @@ public int docID() { @Override public float score() throws IOException { + assertNotEquals("score() called twice on doc " + docID(), lastScoredDoc, docID()); + lastScoredDoc = docID(); final int idx = Arrays.binarySearch(docs, docID()); return scores[idx]; } @@ -130,7 +164,7 @@ public void doTestRandom(boolean twoPhase) throws IOException { } Scorer scorer = scorer(maxDoc, docs, scores, twoPhase); final float minScore = random().nextFloat(); - Scorer minScoreScorer = new MinScoreScorer(null, scorer, minScore); + Scorer minScoreScorer = new MinScoreScorer(fakeWeight(), scorer, minScore); int doc = -1; while (doc != DocIdSetIterator.NO_MORE_DOCS) { final int target; @@ -152,7 +186,7 @@ public void doTestRandom(boolean twoPhase) throws IOException { assertEquals(DocIdSetIterator.NO_MORE_DOCS, doc); } else { assertEquals(docs[idx], doc); - assertEquals(scores[idx], scorer.score(), 0f); + assertEquals(scores[idx], minScoreScorer.score(), 0f); } } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 0ee1d2e9c4a80..6766316fafd46 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -47,6 +47,7 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; @@ -1171,4 +1172,47 @@ public String getValue(final String value) { } } + public void testUpgradeListSetting() { + final Setting> oldSetting = + Setting.listSetting("foo.old", Collections.emptyList(), Function.identity(), Property.NodeScope); + final Setting> newSetting = + Setting.listSetting("foo.new", Collections.emptyList(), Function.identity(), Property.NodeScope); + + final AbstractScopedSettings service = + new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(oldSetting, newSetting)), + Collections.singleton(new SettingUpgrader>() { + + @Override + public Setting> getSetting() { + return oldSetting; + } + + @Override + public String getKey(final String key) { + return "foo.new"; + } + + @Override + public List getListValue(final List value) { + return value.stream().map(s -> "new." + s).collect(Collectors.toList()); + } + })); + + final int length = randomIntBetween(0, 16); + final List values = length == 0 ? Collections.emptyList() : new ArrayList<>(length); + for (int i = 0; i < length; i++) { + values.add(randomAlphaOfLength(8)); + } + + final Settings settings = Settings.builder().putList("foo.old", values).build(); + final Settings upgradedSettings = service.upgradeSettings(settings); + assertFalse(oldSetting.exists(upgradedSettings)); + assertTrue(newSetting.exists(upgradedSettings)); + assertThat( + newSetting.get(upgradedSettings), + equalTo(oldSetting.get(settings).stream().map(s -> "new." + s).collect(Collectors.toList()))); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index b13988b705059..30cfee81ddd40 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -180,6 +180,13 @@ public void testSimpleUpdate() { } } + public void testValidateStringSetting() { + Settings settings = Settings.builder().putList("foo.bar", Arrays.asList("bla-a", "bla-b")).build(); + Setting stringSetting = Setting.simpleString("foo.bar", Property.NodeScope); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stringSetting.get(settings)); + assertEquals("Found list type value for setting [foo.bar] but but did not expect a list for it.", e.getMessage()); + } + private static final Setting FOO_BAR_SETTING = new Setting<>( "foo.bar", "foobar", diff --git a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java index 839b96e641870..99161f842b7c2 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.transport.RemoteClusterService; import org.junit.After; import java.util.Arrays; @@ -122,4 +123,37 @@ private void runUpgradeSettingsOnUpdateTest( assertThat(UpgradeSettingsPlugin.newSetting.get(settingsFunction.apply(response.getState().metaData())), equalTo("new." + value)); } + public void testUpgradeRemoteClusterSettings() { + final boolean skipUnavailable = randomBoolean(); + client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put("search.remote.foo.skip_unavailable", skipUnavailable) + .putList("search.remote.foo.seeds", Collections.singletonList("localhost:9200")) + .put("search.remote.foo.proxy", "localhost:9200") + .build()) + .get(); + + final ClusterStateResponse response = client().admin().cluster().prepareState().clear().setMetaData(true).get(); + + final Settings settings = response.getState().metaData().persistentSettings(); + assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings), + equalTo(skipUnavailable)); + assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Collections.singletonList("localhost:9200"))); + assertFalse(RemoteClusterService.SEARCH_REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); + assertTrue(RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").exists(settings)); + assertThat( + RemoteClusterService.REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace("foo").get(settings), equalTo("localhost:9200")); + } + } diff --git a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java index ac2f2b0d4f32e..c0b01eb5ec518 100644 --- a/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/elasticsearch/discovery/AbstractDisruptionTestCase.java @@ -111,6 +111,7 @@ protected void beforeIndexDeletion() throws Exception { super.beforeIndexDeletion(); internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); assertSeqNos(); + assertSameDocIdsOnShards(); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 4a0d6a8e8884b..3ac41ad04cf2e 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -322,6 +322,7 @@ public boolean clearData(String nodeName) { * This test ensures that when an index deletion takes place while a node is offline, when that * node rejoins the cluster, it deletes the index locally instead of importing it as a dangling index. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33613") public void testIndexDeletionWhenNodeRejoins() throws Exception { final String indexName = "test-index-del-on-node-rejoin-idx"; final int numNodes = 2; diff --git a/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java b/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java new file mode 100644 index 0000000000000..db149bd6d0db0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Map; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ESTestCase; + +public class HttpInfoTests extends ESTestCase { + + public void testCorrectlyDisplayPublishedCname() throws Exception { + InetAddress localhost = InetAddress.getByName("localhost"); + int port = 9200; + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(localhost, port)}, + new TransportAddress(localhost, port) + ), 0L, true + ), "localhost/" + NetworkAddress.format(localhost) + ':' + port + ); + } + + public void hideCnameIfDeprecatedFormat() throws Exception { + InetAddress localhost = InetAddress.getByName("localhost"); + int port = 9200; + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(localhost, port)}, + new TransportAddress(localhost, port) + ), 0L, false + ), NetworkAddress.format(localhost) + ':' + port + ); + } + + public void testCorrectDisplayPublishedIp() throws Exception { + InetAddress localhost = InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("localhost"))); + int port = 9200; + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(localhost, port)}, + new TransportAddress(localhost, port) + ), 0L, true + ), NetworkAddress.format(localhost) + ':' + port + ); + } + + public void testCorrectDisplayPublishedIpv6() throws Exception { + int port = 9200; + TransportAddress localhost = + new TransportAddress(InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("0:0:0:0:0:0:0:1"))), port); + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress(new TransportAddress[]{localhost}, localhost), 0L, true + ), localhost.toString() + ); + } + + @SuppressWarnings("unchecked") + private void assertPublishAddress(HttpInfo httpInfo, String expected) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + httpInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + assertEquals( + expected, + ((Map) createParser(builder).map().get(HttpInfo.Fields.HTTP)) + .get(HttpInfo.Fields.PUBLISH_ADDRESS) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index a26fd72468b48..8f9d90154f8f4 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4087,7 +4087,7 @@ public void markSeqNoAsCompleted(long seqNo) { final long currentLocalCheckpoint = actualEngine.getLocalCheckpoint(); final long resetLocalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Math.toIntExact(currentLocalCheckpoint)); - actualEngine.resetLocalCheckpoint(resetLocalCheckpoint); + actualEngine.getLocalCheckpointTracker().resetCheckpoint(resetLocalCheckpoint); completedSeqNos.clear(); actualEngine.restoreLocalCheckpointFromTranslog(); final Set intersection = new HashSet<>(expectedCompletedSeqNos); @@ -5033,7 +5033,7 @@ public void testAcquireSearcherOnClosingEngine() throws Exception { expectThrows(AlreadyClosedException.class, () -> engine.acquireSearcher("test")); } - private static void trimUnsafeCommits(EngineConfig config) throws IOException { + static void trimUnsafeCommits(EngineConfig config) throws IOException { final Store store = config.getStore(); final TranslogConfig translogConfig = config.getTranslogConfig(); final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 286e85cef3fc6..115785b2e7b96 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.nullValue; public class LiveVersionMapTests extends ESTestCase { @@ -91,6 +92,19 @@ public void testRamBytesUsed() throws Exception { assertEquals(actualRamBytesUsed, estimatedRamBytesUsed, tolerance); } + public void testRefreshingBytes() throws IOException { + LiveVersionMap map = new LiveVersionMap(); + BytesRefBuilder uid = new BytesRefBuilder(); + uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); + try (Releasable r = map.acquireLock(uid.toBytesRef())) { + map.putIndexUnderLock(uid.toBytesRef(), randomIndexVersionValue()); + } + map.beforeRefresh(); + assertThat(map.getRefreshingBytes(), greaterThan(0L)); + map.afterRefresh(true); + assertThat(map.getRefreshingBytes(), equalTo(0L)); + } + private BytesRef uid(String string) { BytesRefBuilder builder = new BytesRefBuilder(); builder.copyChars(string); diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java new file mode 100644 index 0000000000000..4080dd33d5341 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.engine; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.store.Store; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; + +public class ReadOnlyEngineTests extends EngineTestCase { + + public void testReadOnlyEngine() throws Exception { + IOUtils.close(engine, store); + Engine readOnlyEngine = null; + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + int numDocs = scaledRandomIntBetween(10, 1000); + final SeqNoStats lastSeqNoStats; + final List lastDocIds; + try (InternalEngine engine = createEngine(config)) { + Engine.Get get = null; + for (int i = 0; i < numDocs; i++) { + if (rarely()) { + continue; // gap in sequence number + } + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, + System.nanoTime(), -1, false)); + if (get == null || rarely()) { + get = newGet(randomBoolean(), doc); + } + if (rarely()) { + engine.flush(); + } + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + } + engine.syncTranslog(); + engine.flush(); + readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, engine.getSeqNoStats(globalCheckpoint.get()), + engine.getTranslogStats(), false, Function.identity()); + lastSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); + lastDocIds = getDocIds(engine, true); + assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + for (int i = 0; i < numDocs; i++) { + if (randomBoolean()) { + String delId = Integer.toString(i); + engine.delete(new Engine.Delete("test", delId, newUid(delId), primaryTerm.get())); + } + if (rarely()) { + engine.flush(); + } + } + Engine.Searcher external = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.EXTERNAL); + Engine.Searcher internal = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.INTERNAL); + assertSame(external.reader(), internal.reader()); + IOUtils.close(external, internal); + // the locked down engine should still point to the previous commit + assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + try (Engine.GetResult getResult = readOnlyEngine.get(get, readOnlyEngine::acquireSearcher)) { + assertTrue(getResult.exists()); + } + + } + // Close and reopen the main engine + InternalEngineTests.trimUnsafeCommits(config); + try (InternalEngine recoveringEngine = new InternalEngine(config)) { + recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); + // the locked down engine should still point to the previous commit + assertThat(readOnlyEngine.getLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); + assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); + } + } finally { + IOUtils.close(readOnlyEngine); + } + } + + public void testFlushes() throws IOException { + IOUtils.close(engine, store); + Engine readOnlyEngine = null; + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + int numDocs = scaledRandomIntBetween(10, 1000); + try (InternalEngine engine = createEngine(config)) { + for (int i = 0; i < numDocs; i++) { + if (rarely()) { + continue; // gap in sequence number + } + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, + System.nanoTime(), -1, false)); + if (rarely()) { + engine.flush(); + } + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + } + engine.syncTranslog(); + engine.flushAndClose(); + readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, null , null, true, Function.identity()); + Engine.CommitId flush = readOnlyEngine.flush(randomBoolean(), randomBoolean()); + assertEquals(flush, readOnlyEngine.flush(randomBoolean(), randomBoolean())); + } finally { + IOUtils.close(readOnlyEngine); + } + } + } + + public void testReadOnly() throws IOException { + IOUtils.close(engine, store); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + store.createEmpty(); + try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null , null, true, Function.identity())) { + Class expectedException = LuceneTestCase.TEST_ASSERTS_ENABLED ? AssertionError.class : + UnsupportedOperationException.class; + expectThrows(expectedException, () -> readOnlyEngine.index(null)); + expectThrows(expectedException, () -> readOnlyEngine.delete(null)); + expectThrows(expectedException, () -> readOnlyEngine.noOp(null)); + expectThrows(UnsupportedOperationException.class, () -> readOnlyEngine.syncFlush(null, null)); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index cc224019100b5..624205a1a3cbd 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query.functionscore; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.lucene.index.Term; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; @@ -272,6 +273,8 @@ public void testIllegalArguments() { FunctionScoreQueryBuilder builder = new FunctionScoreQueryBuilder(matchAllQuery()); expectThrows(IllegalArgumentException.class, () -> builder.scoreMode(null)); expectThrows(IllegalArgumentException.class, () -> builder.boostMode(null)); + expectThrows(IllegalArgumentException.class, + () -> new FunctionScoreQueryBuilder.FilterFunctionBuilder(new WeightBuilder().setWeight(-1 * randomFloat()))); } public void testParseFunctionsArray() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index e471874f6d664..f2cdfbf8fc566 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -519,18 +519,14 @@ public void testSeqNoCollision() throws Exception { shards.promoteReplicaToPrimary(replica2).get(); logger.info("--> Recover replica3 from replica2"); recoverReplica(replica3, replica2, true); - try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) { + try (Translog.Snapshot snapshot = replica3.getHistoryOperations("test", 0)) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); final List expectedOps = new ArrayList<>(initOperations); expectedOps.add(op2); assertThat(snapshot, containsOperationsInAnyOrder(expectedOps)); assertThat("Peer-recovery should not send overridden operations", snapshot.skippedOperations(), equalTo(0)); } - // TODO: We should assert the content of shards in the ReplicationGroup. - // Without rollback replicas(current implementation), we don't have the same content across shards: - // - replica1 has {doc1} - // - replica2 has {doc1, doc2} - // - replica3 can have either {doc2} only if operation-based recovery or {doc1, doc2} if file-based recovery + shards.assertAllEqual(initDocs + 1); } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 28122665e9bb6..a73d7385d9d4d 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -55,10 +55,8 @@ import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; @@ -306,14 +304,6 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedOpsOnPrimary)); } - - // roll back the extra ops in the replica - shards.removeReplica(replica); - replica.close("resync", false); - replica.store().close(); - newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); - shards.recoverReplica(newReplica); - shards.assertAllEqual(totalDocs); // Make sure that flushing on a recovering shard is ok. shards.flush(); shards.assertAllEqual(totalDocs); @@ -406,31 +396,14 @@ public void testResyncAfterPrimaryPromotion() throws Exception { indexOnReplica(bulkShardRequest, shards, justReplica); } - logger.info("--> seqNo primary {} replica {}", oldPrimary.seqNoStats(), newPrimary.seqNoStats()); - - logger.info("--> resyncing replicas"); + logger.info("--> resyncing replicas seqno_stats primary {} replica {}", oldPrimary.seqNoStats(), newPrimary.seqNoStats()); PrimaryReplicaSyncer.ResyncTask task = shards.promoteReplicaToPrimary(newPrimary).get(); if (syncedGlobalCheckPoint) { assertEquals(extraDocs, task.getResyncedOperations()); } else { assertThat(task.getResyncedOperations(), greaterThanOrEqualTo(extraDocs)); } - List replicas = shards.getReplicas(); - - // check all docs on primary are available on replica - Set primaryIds = getShardDocUIDs(newPrimary); - assertThat(primaryIds.size(), equalTo(initialDocs + extraDocs)); - for (IndexShard replica : replicas) { - Set replicaIds = getShardDocUIDs(replica); - Set temp = new HashSet<>(primaryIds); - temp.removeAll(replicaIds); - assertThat(replica.routingEntry() + " is missing docs", temp, empty()); - temp = new HashSet<>(replicaIds); - temp.removeAll(primaryIds); - // yeah, replica has more docs as there is no Lucene roll back on it - assertThat(replica.routingEntry() + " has to have extra docs", temp, - extraDocsToBeTrimmed > 0 ? not(empty()) : empty()); - } + shards.assertAllEqual(initialDocs + extraDocs); // check translog on replica is trimmed int translogOperations = 0; diff --git a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java index 43b16c6ecc78f..bb3a691a702c8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/GlobalCheckpointListenersTests.java @@ -336,14 +336,65 @@ public void testNotificationUsesExecutor() { }; final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, logger); globalCheckpointListeners.globalCheckpointUpdated(NO_OPS_PERFORMED); + final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE); + final AtomicInteger notified = new AtomicInteger(); final int numberOfListeners = randomIntBetween(0, 16); for (int i = 0; i < numberOfListeners; i++) { - globalCheckpointListeners.add(NO_OPS_PERFORMED, (g, e) -> {}); + globalCheckpointListeners.add(NO_OPS_PERFORMED, (g, e) -> { + notified.incrementAndGet(); + assertThat(g, equalTo(globalCheckpoint)); + assertNull(e); + }); } - globalCheckpointListeners.globalCheckpointUpdated(randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE)); + globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); + assertThat(notified.get(), equalTo(numberOfListeners)); assertThat(count.get(), equalTo(numberOfListeners == 0 ? 0 : 1)); } + public void testNotificationOnClosedUsesExecutor() throws IOException { + final AtomicInteger count = new AtomicInteger(); + final Executor executor = command -> { + count.incrementAndGet(); + command.run(); + }; + final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, logger); + globalCheckpointListeners.close(); + final AtomicInteger notified = new AtomicInteger(); + final int numberOfListeners = randomIntBetween(0, 16); + for (int i = 0; i < numberOfListeners; i++) { + globalCheckpointListeners.add(NO_OPS_PERFORMED, (g, e) -> { + notified.incrementAndGet(); + assertThat(g, equalTo(UNASSIGNED_SEQ_NO)); + assertNotNull(e); + assertThat(e.getShardId(), equalTo(shardId)); + }); + } + assertThat(notified.get(), equalTo(numberOfListeners)); + assertThat(count.get(), equalTo(numberOfListeners)); + } + + public void testListenersReadyToBeNotifiedUsesExecutor() { + final AtomicInteger count = new AtomicInteger(); + final Executor executor = command -> { + count.incrementAndGet(); + command.run(); + }; + final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, logger); + final long globalCheckpoint = randomNonNegativeLong(); + globalCheckpointListeners.globalCheckpointUpdated(globalCheckpoint); + final AtomicInteger notified = new AtomicInteger(); + final int numberOfListeners = randomIntBetween(0, 16); + for (int i = 0; i < numberOfListeners; i++) { + globalCheckpointListeners.add(randomLongBetween(0, globalCheckpoint), (g, e) -> { + notified.incrementAndGet(); + assertThat(g, equalTo(globalCheckpoint)); + assertNull(e); + }); + } + assertThat(notified.get(), equalTo(numberOfListeners)); + assertThat(count.get(), equalTo(numberOfListeners)); + } + public void testConcurrency() throws BrokenBarrierException, InterruptedException { final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, 8)); final GlobalCheckpointListeners globalCheckpointListeners = new GlobalCheckpointListeners(shardId, executor, logger); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 4ed74388f0e1e..0c5d9b1613f32 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -106,6 +106,7 @@ import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.translog.TestTranslog; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.index.translog.TranslogTests; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -181,6 +182,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; /** * Simple unit-test IndexShard related operations. @@ -945,28 +947,25 @@ public void onFailure(Exception e) { resyncLatch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(indexShard.seqNoStats().getMaxSeqNo(), equalTo(maxSeqNo)); - - closeShards(indexShard); + closeShard(indexShard, false); } - public void testThrowBackLocalCheckpointOnReplica() throws IOException, InterruptedException { + public void testRollbackReplicaEngineOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); // most of the time this is large enough that most of the time there will be at least one gap final int operations = 1024 - scaledRandomIntBetween(0, 1024); indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); - final long globalCheckpointOnReplica = - randomIntBetween( - Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), - Math.toIntExact(indexShard.getLocalCheckpoint())); + final long globalCheckpointOnReplica = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, indexShard.getLocalCheckpoint()); indexShard.updateGlobalCheckpointOnReplica(globalCheckpointOnReplica, "test"); - - final int globalCheckpoint = - randomIntBetween( - Math.toIntExact(SequenceNumbers.UNASSIGNED_SEQ_NO), - Math.toIntExact(indexShard.getLocalCheckpoint())); + final long globalCheckpoint = randomLongBetween(SequenceNumbers.UNASSIGNED_SEQ_NO, indexShard.getLocalCheckpoint()); + Set docsBelowGlobalCheckpoint = getShardDocUIDs(indexShard).stream() + .filter(id -> Long.parseLong(id) <= Math.max(globalCheckpointOnReplica, globalCheckpoint)).collect(Collectors.toSet()); final CountDownLatch latch = new CountDownLatch(1); + final boolean shouldRollback = Math.max(globalCheckpoint, globalCheckpointOnReplica) < indexShard.seqNoStats().getMaxSeqNo() + && indexShard.seqNoStats().getMaxSeqNo() != SequenceNumbers.NO_OPS_PERFORMED; + final Engine beforeRollbackEngine = indexShard.getEngine(); indexShard.acquireReplicaOperationPermit( indexShard.pendingPrimaryTerm + 1, globalCheckpoint, @@ -985,18 +984,21 @@ public void onFailure(final Exception e) { ThreadPool.Names.SAME, ""); latch.await(); - if (globalCheckpointOnReplica == SequenceNumbers.UNASSIGNED_SEQ_NO - && globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (globalCheckpointOnReplica == SequenceNumbers.UNASSIGNED_SEQ_NO && globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { assertThat(indexShard.getLocalCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); } else { assertThat(indexShard.getLocalCheckpoint(), equalTo(Math.max(globalCheckpoint, globalCheckpointOnReplica))); } - + assertThat(getShardDocUIDs(indexShard), equalTo(docsBelowGlobalCheckpoint)); + if (shouldRollback) { + assertThat(indexShard.getEngine(), not(sameInstance(beforeRollbackEngine))); + } else { + assertThat(indexShard.getEngine(), sameInstance(beforeRollbackEngine)); + } // ensure that after the local checkpoint throw back and indexing again, the local checkpoint advances final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(indexShard.getLocalCheckpoint())); assertThat(indexShard.getLocalCheckpoint(), equalTo((long) result.localCheckpoint)); - - closeShards(indexShard); + closeShard(indexShard, false); } public void testConcurrentTermIncreaseOnReplicaShard() throws BrokenBarrierException, InterruptedException, IOException { @@ -1200,7 +1202,8 @@ public void testShardStats() throws IOException { public void testShardStatsWithFailures() throws IOException { allowShardFailures(); final ShardId shardId = new ShardId("index", "_na_", 0); - final ShardRouting shardRouting = newShardRouting(shardId, "node", true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, ShardRoutingState.INITIALIZING); + final ShardRouting shardRouting = + newShardRouting(shardId, "node", true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); @@ -1879,13 +1882,17 @@ public void testRecoverFromStoreRemoveStaleOperations() throws Exception { SourceToParse.source(indexName, "_doc", "doc-1", new BytesArray("{}"), XContentType.JSON)); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1")); - // Simulate resync (without rollback): Noop #1, index #2 - acquireReplicaOperationPermitBlockingly(shard, shard.pendingPrimaryTerm + 1); + // Here we try to increase term (i.e. a new primary is promoted) without rolling back a replica so we can keep stale operations + // in the index commit; then verify that a recovery from store (started with the safe commit) will remove all stale operations. + shard.pendingPrimaryTerm++; + shard.operationPrimaryTerm++; + shard.getEngine().rollTranslogGeneration(); shard.markSeqNoAsNoop(1, "test"); shard.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(indexName, "_doc", "doc-2", new BytesArray("{}"), XContentType.JSON)); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1", "doc-2")); + closeShard(shard, false); // Recovering from store should discard doc #1 final ShardRouting replicaRouting = shard.routingEntry(); IndexShard newShard = reinitShard(shard, @@ -2248,10 +2255,11 @@ public Translog.Operation next() throws IOException { null)); primary.recoverFromStore(); + primary.recoveryState().getTranslog().totalOperations(snapshot.totalOperations()); + primary.recoveryState().getTranslog().totalOperationsOnStart(snapshot.totalOperations()); primary.state = IndexShardState.RECOVERING; // translog recovery on the next line would otherwise fail as we are in POST_RECOVERY - primary.runTranslogRecovery(primary.getEngine(), snapshot); - assertThat(primary.recoveryState().getTranslog().totalOperationsOnStart(), equalTo(numTotalEntries)); - assertThat(primary.recoveryState().getTranslog().totalOperations(), equalTo(numTotalEntries)); + primary.runTranslogRecovery(primary.getEngine(), snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, + primary.recoveryState().getTranslog()::incrementRecoveredOperations); assertThat(primary.recoveryState().getTranslog().recoveredOperations(), equalTo(numTotalEntries - numCorruptEntries)); closeShards(primary); @@ -2864,6 +2872,9 @@ private Result indexOnReplicaWithGaps( } else { gap = true; } + if (rarely()) { + indexShard.flush(new FlushRequest()); + } } assert localCheckpoint == indexShard.getLocalCheckpoint(); assert !gap || (localCheckpoint != max); @@ -3401,4 +3412,19 @@ public void testSupplyTombstoneDoc() throws Exception { closeShards(shard); } + + public void testResetEngine() throws Exception { + IndexShard shard = newStartedShard(false); + indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint())); + final long globalCheckpoint = randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()); + shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); + Set docBelowGlobalCheckpoint = getShardDocUIDs(shard).stream() + .filter(id -> Long.parseLong(id) <= globalCheckpoint).collect(Collectors.toSet()); + TranslogStats translogStats = shard.translogStats(); + shard.resetEngineToGlobalCheckpoint(); + assertThat(getShardDocUIDs(shard), equalTo(docBelowGlobalCheckpoint)); + assertThat(shard.seqNoStats().getMaxSeqNo(), equalTo(globalCheckpoint)); + assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations())); + closeShard(shard, false); + } } diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index cb93d803bb7c6..8d0f1845be60d 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -103,6 +103,7 @@ protected Collection> nodePlugins() { protected void beforeIndexDeletion() throws Exception { super.beforeIndexDeletion(); assertSeqNos(); + assertSameDocIdsOnShards(); } public void testSimpleRelocationNoIndexing() { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java index e3fe39db95246..bc8070d7ae40f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java @@ -24,19 +24,16 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -44,14 +41,10 @@ import java.util.concurrent.atomic.AtomicBoolean; public class MultiBucketCollectorTests extends ESTestCase { - private static class FakeScorer extends Scorer { + private static class ScoreAndDoc extends Scorable { float score; int doc = -1; - FakeScorer() { - super(null); - } - @Override public int docID() { return doc; @@ -61,26 +54,6 @@ public int docID() { public float score() { return score; } - - @Override - public DocIdSetIterator iterator() { - throw new UnsupportedOperationException(); - } - - @Override - public float getMaxScore(int upTo) throws IOException { - return Float.MAX_VALUE; - } - - @Override - public Weight getWeight() { - throw new UnsupportedOperationException(); - } - - @Override - public Collection getChildren() { - throw new UnsupportedOperationException(); - } } private static class TerminateAfterBucketCollector extends BucketCollector { @@ -171,7 +144,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext context) throws IO final LeafBucketCollector leafCollector = in.getLeafCollector(context); return new LeafBucketCollectorBase(leafCollector, null) { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { super.setScorer(scorer); setScorerCalled.set(true); } @@ -235,7 +208,7 @@ public void testSetScorerAfterCollectionTerminated() throws IOException { collector1 = new TerminateAfterBucketCollector(collector1, 1); collector2 = new TerminateAfterBucketCollector(collector2, 2); - Scorer scorer = new FakeScorer(); + Scorable scorer = new ScoreAndDoc(); List collectors = Arrays.asList(collector1, collector2); Collections.shuffle(collectors, random()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java index 1915857302cd3..6f2bedbdd3712 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.support; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.support.values.ScriptBytesValues; @@ -59,7 +59,7 @@ public Object run() { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { } @Override diff --git a/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java b/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java index 70eb0266eea38..76807f4722afc 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java @@ -27,11 +27,11 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.NumericUtils; @@ -99,7 +99,7 @@ public void testSearch() throws Exception { public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { return new LeafCollector() { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { } @Override diff --git a/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java b/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java index 9ae4b9bc7daf5..881dc6f9587af 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java @@ -26,11 +26,11 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -92,7 +92,7 @@ public void testSearch() throws Exception { public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { return new LeafCollector() { @Override - public void setScorer(Scorer scorer) throws IOException { + public void setScorer(Scorable scorer) throws IOException { } @Override diff --git a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java index 44c49ace5de8f..3c91cda5f86c3 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -275,7 +275,6 @@ public void testMultiContextFiltering() throws Exception { assertSuggestions("foo", typeFilterSuggest, "suggestion9", "suggestion6", "suggestion5", "suggestion2", "suggestion1"); } - @AwaitsFix(bugUrl = "multiple context boosting is broken, as a suggestion, contexts pair is treated as (num(context) entries)") public void testMultiContextBoosting() throws Exception { LinkedHashMap> map = new LinkedHashMap<>(); map.put("cat", ContextBuilder.category("cat").field("cat").build()); @@ -328,7 +327,8 @@ public void testMultiContextBoosting() throws Exception { CategoryQueryContext.builder().setCategory("cat1").build()) ); multiContextBoostSuggest.contexts(contextMap); - assertSuggestions("foo", multiContextBoostSuggest, "suggestion9", "suggestion6", "suggestion5", "suggestion2", "suggestion1"); + // the score of each suggestion is the maximum score among the matching contexts + assertSuggestions("foo", multiContextBoostSuggest, "suggestion9", "suggestion8", "suggestion5", "suggestion6", "suggestion4"); } public void testSeveralContexts() throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index c91c04884c5a7..ee270ee6e4803 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -39,10 +39,6 @@ public static ShardRouting newShardRouting(String index, int shardId, String cur return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, primary, state); } - public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, RecoverySource recoverySource, ShardRoutingState state) { - return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource, buildUnassignedInfo(state), buildAllocationId(state), -1); - } - public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state) { return new ShardRouting(shardId, currentNodeId, null, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java b/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java new file mode 100644 index 0000000000000..b24a010c1a0d6 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + + +import java.util.Objects; + +/** A tuple of document id, sequence number and primary term of a document */ +public final class DocIdSeqNoAndTerm { + private final String id; + private final long seqNo; + private final long primaryTerm; + + public DocIdSeqNoAndTerm(String id, long seqNo, long primaryTerm) { + this.id = id; + this.seqNo = seqNo; + this.primaryTerm = primaryTerm; + } + + public String getId() { + return id; + } + + public long getSeqNo() { + return seqNo; + } + + public long getPrimaryTerm() { + return primaryTerm; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DocIdSeqNoAndTerm that = (DocIdSeqNoAndTerm) o; + return Objects.equals(id, that.id) && seqNo == that.seqNo && primaryTerm == that.primaryTerm; + } + + @Override + public int hashCode() { + return Objects.hash(id, seqNo, primaryTerm); + } + + @Override + public String toString() { + return "DocIdSeqNoAndTerm{" + "id='" + id + " seqNo=" + seqNo + " primaryTerm=" + primaryTerm + "}"; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 283a7b137533d..f9377afe6ed3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -95,11 +96,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -775,26 +775,41 @@ protected void concurrentlyApplyOps(List ops, InternalEngine e } /** - * Gets all docId from the given engine. + * Gets a collection of tuples of docId, sequence number, and primary term of all live documents in the provided engine. */ - public static Set getDocIds(Engine engine, boolean refresh) throws IOException { + public static List getDocIds(Engine engine, boolean refresh) throws IOException { if (refresh) { engine.refresh("test_get_doc_ids"); } try (Engine.Searcher searcher = engine.acquireSearcher("test_get_doc_ids")) { - Set ids = new HashSet<>(); + List docs = new ArrayList<>(); for (LeafReaderContext leafContext : searcher.reader().leaves()) { LeafReader reader = leafContext.reader(); + NumericDocValues seqNoDocValues = reader.getNumericDocValues(SeqNoFieldMapper.NAME); + NumericDocValues primaryTermDocValues = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); Bits liveDocs = reader.getLiveDocs(); for (int i = 0; i < reader.maxDoc(); i++) { if (liveDocs == null || liveDocs.get(i)) { Document uuid = reader.document(i, Collections.singleton(IdFieldMapper.NAME)); BytesRef binaryID = uuid.getBinaryValue(IdFieldMapper.NAME); - ids.add(Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length))); + String id = Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length)); + final long primaryTerm; + if (primaryTermDocValues.advanceExact(i)) { + primaryTerm = primaryTermDocValues.longValue(); + } else { + primaryTerm = 0; // non-root documents of a nested document. + } + if (seqNoDocValues.advanceExact(i) == false) { + throw new AssertionError("seqNoDocValues not found for doc[" + i + "] id[" + id + "]"); + } + final long seqNo = seqNoDocValues.longValue(); + docs.add(new DocIdSeqNoAndTerm(id, seqNo, primaryTerm)); } } } - return ids; + docs.sort(Comparator.comparing(DocIdSeqNoAndTerm::getId) + .thenComparingLong(DocIdSeqNoAndTerm::getSeqNo).thenComparingLong(DocIdSeqNoAndTerm::getPrimaryTerm)); + return docs; } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 9082b4153b0bf..a9e715a1129df 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.query.DisabledQueryCache; +import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineTestCase; @@ -82,12 +83,14 @@ import java.util.Collections; import java.util.EnumSet; import java.util.HashSet; +import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; import java.util.function.Consumer; +import java.util.stream.Collectors; import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; import static org.hamcrest.Matchers.contains; @@ -451,15 +454,20 @@ protected void closeShards(IndexShard... shards) throws IOException { closeShards(Arrays.asList(shards)); } + protected void closeShard(IndexShard shard, boolean assertConsistencyBetweenTranslogAndLucene) throws IOException { + try { + if (assertConsistencyBetweenTranslogAndLucene) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + } finally { + IOUtils.close(() -> shard.close("test", false), shard.store()); + } + } + protected void closeShards(Iterable shards) throws IOException { for (IndexShard shard : shards) { if (shard != null) { - try { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - shard.close("test", false); - } finally { - IOUtils.close(shard.store()); - } + closeShard(shard, true); } } } @@ -635,7 +643,11 @@ private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) th return result; } - protected Set getShardDocUIDs(final IndexShard shard) throws IOException { + public static Set getShardDocUIDs(final IndexShard shard) throws IOException { + return getDocIdAndSeqNos(shard).stream().map(DocIdSeqNoAndTerm::getId).collect(Collectors.toSet()); + } + + public static List getDocIdAndSeqNos(final IndexShard shard) throws IOException { return EngineTestCase.getDocIds(shard.getEngine(), true); } @@ -726,7 +738,7 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID()); final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId()); final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource(snapshot, version, index); - final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, recoverySource, ShardRoutingState.INITIALIZING); + final ShardRouting shardRouting = newShardRouting(shardId, node.getId(), true, ShardRoutingState.INITIALIZING, recoverySource); shard.markAsRecovering("from snapshot", new RecoveryState(shardRouting, node, null)); repository.restoreShard(shard, snapshot.getSnapshotId(), version, indexId, shard.shardId(), shard.recoveryState()); diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 71d40a7b86ab6..be77846b2ba34 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -20,7 +20,7 @@ package org.elasticsearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Scorable; import org.elasticsearch.index.similarity.ScriptedSimilarity.Doc; import org.elasticsearch.index.similarity.ScriptedSimilarity.Field; import org.elasticsearch.index.similarity.ScriptedSimilarity.Query; @@ -334,7 +334,7 @@ public void setNextVar(String name, Object value) { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { ctx.put("_score", new ScoreAccessor(scorer)); } @@ -553,7 +553,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - Scorer[] scorerHolder = new Scorer[1]; + Scorable[] scorerHolder = new Scorable[1]; return new ScoreScript(params, lookup, ctx) { @Override public double execute() { @@ -566,7 +566,7 @@ public double execute() { } @Override - public void setScorer(Scorer scorer) { + public void setScorer(Scorable scorer) { scorerHolder[0] = scorer; } }; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 68a862c109d98..52ed2205ab5f5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -125,6 +125,7 @@ import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -132,6 +133,7 @@ import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesQueryCache; import org.elasticsearch.indices.IndicesRequestCache; @@ -2380,6 +2382,49 @@ protected void assertSeqNos() throws Exception { }); } + /** + * Asserts that all shards with the same shardId should have document Ids. + */ + public void assertSameDocIdsOnShards() throws Exception { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (ObjectObjectCursor indexRoutingTable : state.routingTable().indicesRouting()) { + for (IntObjectCursor indexShardRoutingTable : indexRoutingTable.value.shards()) { + ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard(); + if (primaryShardRouting == null || primaryShardRouting.assignedToNode() == false) { + continue; + } + DiscoveryNode primaryNode = state.nodes().get(primaryShardRouting.currentNodeId()); + IndexShard primaryShard = internalCluster().getInstance(IndicesService.class, primaryNode.getName()) + .indexServiceSafe(primaryShardRouting.index()).getShard(primaryShardRouting.id()); + final List docsOnPrimary; + try { + docsOnPrimary = IndexShardTestCase.getDocIdAndSeqNos(primaryShard); + } catch (AlreadyClosedException ex) { + continue; + } + for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) { + if (replicaShardRouting.assignedToNode() == false) { + continue; + } + DiscoveryNode replicaNode = state.nodes().get(replicaShardRouting.currentNodeId()); + IndexShard replicaShard = internalCluster().getInstance(IndicesService.class, replicaNode.getName()) + .indexServiceSafe(replicaShardRouting.index()).getShard(replicaShardRouting.id()); + final List docsOnReplica; + try { + docsOnReplica = IndexShardTestCase.getDocIdAndSeqNos(replicaShard); + } catch (AlreadyClosedException ex) { + continue; + } + assertThat("out of sync shards: primary=[" + primaryShardRouting + "] num_docs_on_primary=[" + docsOnPrimary.size() + + "] vs replica=[" + replicaShardRouting + "] num_docs_on_replica=[" + docsOnReplica.size() + "]", + docsOnReplica, equalTo(docsOnPrimary)); + } + } + } + }); + } + public static boolean inFipsJvm() { return Security.getProviders()[0].getName().toLowerCase(Locale.ROOT).contains("fips"); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index ecb965040f87b..9d47c4e24a90b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -236,6 +236,16 @@ protected boolean preserveTemplatesUponCompletion() { return false; } + /** + * Controls whether or not to preserve cluster settings upon completion of the test. The default implementation is to remove all cluster + * settings. + * + * @return true if cluster settings should be preserved and otherwise false + */ + protected boolean preserveClusterSettings() { + return false; + } + /** * Returns whether to preserve the repositories on completion of this test. * Defaults to not preserving repos. See also @@ -295,7 +305,11 @@ private void wipeCluster() throws IOException { } wipeSnapshots(); - wipeClusterSettings(); + + // wipe cluster settings + if (preserveClusterSettings() == false) { + wipeClusterSettings(); + } } /** diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java new file mode 100644 index 0000000000000..7e73e795b8a05 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.test.rest.ESRestTestCase; + +public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase { + + private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); + + public final boolean isRunningAgainstOldCluster() { + return runningAgainstOldCluster; + } + + private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + + public final Version getOldClusterVersion() { + return oldClusterVersion; + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveSnapshotsUponCompletion() { + return true; + } + + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + @Override + protected boolean preserveClusterSettings() { + return true; + } + +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java index 0562a88957ccb..7bc952a3ea8e8 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-non-compliant-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr; +import org.apache.lucene.util.Constants; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Booleans; @@ -47,6 +48,7 @@ public void testCreateAndFollowIndex() { } public void testAutoFollow() throws Exception { + assumeFalse("windows is the worst", Constants.WINDOWS); if (runningAgainstLeaderCluster == false) { final Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster"); request.setJsonEntity("{\"leader_index_patterns\":[\"*\"]}"); diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle index d4fe9ee554c3d..e2c772d708846 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle @@ -47,7 +47,7 @@ followClusterTestCluster { setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' - setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.monitoring.collection.enabled', 'true' extraConfigFile 'roles.yml', 'roles.yml' setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index c3e41694683f9..e42d8a725de90 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -31,6 +31,7 @@ import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; public class FollowIndexSecurityIT extends ESRestTestCase { @@ -81,6 +82,7 @@ public void testFollowIndex() throws Exception { createAndFollowIndex("leader_cluster:" + allowedIndex, allowedIndex); assertBusy(() -> verifyDocuments(client(), allowedIndex, numDocs)); assertThat(countCcrNodeTasks(), equalTo(1)); + assertBusy(() -> verifyCcrMonitoring(allowedIndex)); assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/unfollow"))); // Make sure that there are no other ccr relates operations running: assertBusy(() -> { @@ -269,4 +271,37 @@ private static void unfollowIndex(String followIndex) throws IOException { assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/unfollow"))); } + private static void verifyCcrMonitoring(String expectedLeaderIndex) throws IOException { + ensureYellow(".monitoring-*"); + + Request request = new Request("GET", "/.monitoring-*/_search"); + request.setJsonEntity("{\"query\": {\"term\": {\"type\": \"ccr_stats\"}}}"); + Map response = toMap(adminClient().performRequest(request)); + + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(numDocs, greaterThanOrEqualTo(1)); + + int numberOfOperationsReceived = 0; + int numberOfOperationsIndexed = 0; + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + for (int i = 0; i < numDocs; i++) { + Map hit = (Map) hits.get(i); + String leaderIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.leader_index", hit); + if (leaderIndex.endsWith(expectedLeaderIndex) == false) { + continue; + } + + int foundNumberOfOperationsReceived = + (int) XContentMapValues.extractValue("_source.ccr_stats.operations_received", hit); + numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); + int foundNumberOfOperationsIndexed = + (int) XContentMapValues.extractValue("_source.ccr_stats.number_of_operations_indexed", hit); + numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); + } + + assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); + assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); + } + } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 396c247af40b0..b3b6372384888 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -27,6 +27,7 @@ followClusterTestCluster { dependsOn leaderClusterTestRunner numNodes = 1 clusterName = 'follow-cluster' + setting 'xpack.monitoring.collection.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' setting 'cluster.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 76d0e43813594..0e56084e10c54 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -25,6 +25,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class FollowIndexIT extends ESRestTestCase { @@ -75,6 +76,7 @@ public void testFollowIndex() throws Exception { index(leaderClient, leaderIndexName, Integer.toString(id + 2), "field", id + 2, "filtered_field", "true"); } assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3)); + assertBusy(() -> verifyCcrMonitoring(leaderIndexName)); } } @@ -104,6 +106,7 @@ public void testAutoFollowPatterns() throws Exception { ensureYellow("logs-20190101"); verifyDocuments("logs-20190101", 5); }); + assertBusy(() -> verifyCcrMonitoring("logs-20190101")); } private static void index(RestClient client, String index, String id, Object... fields) throws IOException { @@ -155,6 +158,39 @@ private static void verifyDocuments(String index, int expectedNumDocs) throws IO } } + private static void verifyCcrMonitoring(String expectedLeaderIndex) throws IOException { + ensureYellow(".monitoring-*"); + + Request request = new Request("GET", "/.monitoring-*/_search"); + request.setJsonEntity("{\"query\": {\"term\": {\"type\": \"ccr_stats\"}}}"); + Map response = toMap(client().performRequest(request)); + + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(numDocs, greaterThanOrEqualTo(1)); + + int numberOfOperationsReceived = 0; + int numberOfOperationsIndexed = 0; + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + for (int i = 0; i < numDocs; i++) { + Map hit = (Map) hits.get(i); + String leaderIndex = (String) XContentMapValues.extractValue("_source.ccr_stats.leader_index", hit); + if (leaderIndex.endsWith(expectedLeaderIndex) == false) { + continue; + } + + int foundNumberOfOperationsReceived = + (int) XContentMapValues.extractValue("_source.ccr_stats.operations_received", hit); + numberOfOperationsReceived = Math.max(numberOfOperationsReceived, foundNumberOfOperationsReceived); + int foundNumberOfOperationsIndexed = + (int) XContentMapValues.extractValue("_source.ccr_stats.number_of_operations_indexed", hit); + numberOfOperationsIndexed = Math.max(numberOfOperationsIndexed, foundNumberOfOperationsIndexed); + } + + assertThat(numberOfOperationsReceived, greaterThanOrEqualTo(1)); + assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); + } + private static Map toMap(Response response) throws IOException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 353a66db26339..72782f6e0fee1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -40,19 +40,17 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; -import org.elasticsearch.xpack.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; import org.elasticsearch.xpack.ccr.action.DeleteAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.FollowIndexAction; import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.ShardChangesAction; -import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor; import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction; +import org.elasticsearch.xpack.ccr.action.TransportCreateAndFollowIndexAction; import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.TransportFollowIndexAction; import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.ccr.action.TransportUnfollowIndexAction; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; @@ -63,6 +61,11 @@ import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestUnfollowIndexAction; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; import java.util.Arrays; import java.util.Collection; @@ -73,8 +76,8 @@ import java.util.function.Supplier; import static java.util.Collections.emptyList; -import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_ENABLED_SETTING; import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_FOLLOWING_INDEX_SETTING; +import static org.elasticsearch.xpack.core.XPackSettings.CCR_ENABLED_SETTING; /** * Container class for CCR functionality. @@ -148,9 +151,9 @@ public List> getPersistentTasksExecutor(ClusterServic // stats action new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class), // follow actions - new ActionHandler<>(CreateAndFollowIndexAction.INSTANCE, CreateAndFollowIndexAction.TransportAction.class), - new ActionHandler<>(FollowIndexAction.INSTANCE, FollowIndexAction.TransportAction.class), - new ActionHandler<>(UnfollowIndexAction.INSTANCE, UnfollowIndexAction.TransportAction.class), + new ActionHandler<>(CreateAndFollowIndexAction.INSTANCE, TransportCreateAndFollowIndexAction.class), + new ActionHandler<>(FollowIndexAction.INSTANCE, TransportFollowIndexAction.class), + new ActionHandler<>(UnfollowIndexAction.INSTANCE, TransportUnfollowIndexAction.class), // auto-follow actions new ActionHandler<>(DeleteAutoFollowPatternAction.INSTANCE, TransportDeleteAutoFollowPatternAction.class), new ActionHandler<>(PutAutoFollowPatternAction.INSTANCE, TransportPutAutoFollowPatternAction.class)); @@ -160,6 +163,10 @@ public List getRestHandlers(Settings settings, RestController restC IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster) { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( // stats API new RestCcrStatsAction(settings, restController), @@ -179,8 +186,8 @@ public List getNamedWriteables() { ShardFollowTask::new), // Task statuses - new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTask.Status.STATUS_PARSER_NAME, - ShardFollowNodeTask.Status::new) + new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTaskStatus.STATUS_PARSER_NAME, + ShardFollowNodeTaskStatus::new) ); } @@ -192,9 +199,9 @@ public List getNamedXContent() { // Task statuses new NamedXContentRegistry.Entry( - ShardFollowNodeTask.Status.class, - new ParseField(ShardFollowNodeTask.Status.STATUS_PARSER_NAME), - ShardFollowNodeTask.Status::fromXContent)); + ShardFollowNodeTaskStatus.class, + new ParseField(ShardFollowNodeTaskStatus.STATUS_PARSER_NAME), + ShardFollowNodeTaskStatus::fromXContent)); } /** @@ -225,10 +232,7 @@ public List> getExecutorBuilders(Settings settings) { return Collections.emptyList(); } - FixedExecutorBuilder ccrTp = new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, - 32, 100, "xpack.ccr.ccr_thread_pool"); - - return Collections.singletonList(ccrTp); + return Collections.singletonList(new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, 32, 100, "xpack.ccr.ccr_thread_pool")); } protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index a942990ea5a74..122f5a913d216 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.Arrays; import java.util.List; @@ -22,11 +23,6 @@ private CcrSettings() { } - /** - * Setting for controlling whether or not CCR is enabled. - */ - static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); - /** * Index setting for a following index. */ @@ -46,7 +42,7 @@ private CcrSettings() { */ static List> getSettings() { return Arrays.asList( - CCR_ENABLED_SETTING, + XPackSettings.CCR_ENABLED_SETTING, CCR_FOLLOWING_INDEX_SETTING, CCR_AUTO_FOLLOW_POLL_INTERVAL); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 0781110678a8a..04d3e070816d8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -27,6 +27,8 @@ import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.util.ArrayList; import java.util.HashMap; @@ -70,12 +72,6 @@ public AutoFollowCoordinator( } private void doAutoFollow() { - if (ccrLicenseChecker.isCcrAllowed() == false) { - // TODO: set non-compliant status on auto-follow coordination that can be viewed via a stats API - LOGGER.warn("skipping auto-follower coordination", LicenseUtils.newComplianceException("ccr")); - threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); - return; - } if (localNodeMaster == false) { return; } @@ -91,6 +87,13 @@ private void doAutoFollow() { return; } + if (ccrLicenseChecker.isCcrAllowed() == false) { + // TODO: set non-compliant status on auto-follow coordination that can be viewed via a stats API + LOGGER.warn("skipping auto-follower coordination", LicenseUtils.newComplianceException("ccr")); + threadPool.schedule(pollInterval, ThreadPool.Names.SAME, this::doAutoFollow); + return; + } + Consumer handler = e -> { if (e != null) { LOGGER.warn("failure occurred during auto-follower coordination", e); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java deleted file mode 100644 index 223f6ed8e6d25..0000000000000 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ccr.action; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.action.support.ActiveShardsObserver; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.transport.RemoteClusterService; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.ccr.CcrLicenseChecker; -import org.elasticsearch.xpack.ccr.CcrSettings; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -public class CreateAndFollowIndexAction extends Action { - - public static final CreateAndFollowIndexAction INSTANCE = new CreateAndFollowIndexAction(); - public static final String NAME = "indices:admin/xpack/ccr/create_and_follow_index"; - - private CreateAndFollowIndexAction() { - super(NAME); - } - - @Override - public Response newResponse() { - return new Response(); - } - - public static class Request extends AcknowledgedRequest implements IndicesRequest { - - private FollowIndexAction.Request followRequest; - - public Request(FollowIndexAction.Request followRequest) { - this.followRequest = Objects.requireNonNull(followRequest); - } - - Request() { - } - - public FollowIndexAction.Request getFollowRequest() { - return followRequest; - } - - @Override - public ActionRequestValidationException validate() { - return followRequest.validate(); - } - - @Override - public String[] indices() { - return new String[]{followRequest.getFollowerIndex()}; - } - - @Override - public IndicesOptions indicesOptions() { - return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - followRequest = new FollowIndexAction.Request(); - followRequest.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - followRequest.writeTo(out); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Request request = (Request) o; - return Objects.equals(followRequest, request.followRequest); - } - - @Override - public int hashCode() { - return Objects.hash(followRequest); - } - } - - public static class Response extends ActionResponse implements ToXContentObject { - - private boolean followIndexCreated; - private boolean followIndexShardsAcked; - private boolean indexFollowingStarted; - - Response() { - } - - Response(boolean followIndexCreated, boolean followIndexShardsAcked, boolean indexFollowingStarted) { - this.followIndexCreated = followIndexCreated; - this.followIndexShardsAcked = followIndexShardsAcked; - this.indexFollowingStarted = indexFollowingStarted; - } - - public boolean isFollowIndexCreated() { - return followIndexCreated; - } - - public boolean isFollowIndexShardsAcked() { - return followIndexShardsAcked; - } - - public boolean isIndexFollowingStarted() { - return indexFollowingStarted; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - followIndexCreated = in.readBoolean(); - followIndexShardsAcked = in.readBoolean(); - indexFollowingStarted = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(followIndexCreated); - out.writeBoolean(followIndexShardsAcked); - out.writeBoolean(indexFollowingStarted); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.field("follow_index_created", followIndexCreated); - builder.field("follow_index_shards_acked", followIndexShardsAcked); - builder.field("index_following_started", indexFollowingStarted); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Response response = (Response) o; - return followIndexCreated == response.followIndexCreated && - followIndexShardsAcked == response.followIndexShardsAcked && - indexFollowingStarted == response.indexFollowingStarted; - } - - @Override - public int hashCode() { - return Objects.hash(followIndexCreated, followIndexShardsAcked, indexFollowingStarted); - } - } - - public static class TransportAction extends TransportMasterNodeAction { - - private final Client client; - private final AllocationService allocationService; - private final RemoteClusterService remoteClusterService; - private final ActiveShardsObserver activeShardsObserver; - private final CcrLicenseChecker ccrLicenseChecker; - - @Inject - public TransportAction( - final Settings settings, - final ThreadPool threadPool, - final TransportService transportService, - final ClusterService clusterService, - final ActionFilters actionFilters, - final IndexNameExpressionResolver indexNameExpressionResolver, - final Client client, - final AllocationService allocationService, - final CcrLicenseChecker ccrLicenseChecker) { - super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new); - this.client = client; - this.allocationService = allocationService; - this.remoteClusterService = transportService.getRemoteClusterService(); - this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); - this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); - } - - @Override - protected String executor() { - return ThreadPool.Names.SAME; - } - - @Override - protected Response newResponse() { - return new Response(); - } - - @Override - protected void masterOperation( - final Request request, final ClusterState state, final ActionListener listener) throws Exception { - if (ccrLicenseChecker.isCcrAllowed() == false) { - listener.onFailure(LicenseUtils.newComplianceException("ccr")); - return; - } - final String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; - final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); - if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - createFollowerIndexAndFollowLocalIndex(request, state, listener); - } else { - assert remoteClusterIndices.size() == 1; - final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); - assert entry.getValue().size() == 1; - final String clusterAlias = entry.getKey(); - final String leaderIndex = entry.getValue().get(0); - createFollowerIndexAndFollowRemoteIndex(request, clusterAlias, leaderIndex, listener); - } - } - - private void createFollowerIndexAndFollowLocalIndex( - final Request request, final ClusterState state, final ActionListener listener) { - // following an index in local cluster, so use local cluster state to fetch leader index metadata - final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getFollowRequest().getLeaderIndex()); - createFollowerIndex(leaderIndexMetadata, request, listener); - } - - private void createFollowerIndexAndFollowRemoteIndex( - final Request request, - final String clusterAlias, - final String leaderIndex, - final ActionListener listener) { - ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( - client, - clusterAlias, - leaderIndex, - listener::onFailure, - leaderIndexMetaData -> createFollowerIndex(leaderIndexMetaData, request, listener)); - } - - private void createFollowerIndex( - final IndexMetaData leaderIndexMetaData, final Request request, final ActionListener listener) { - if (leaderIndexMetaData == null) { - listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + - "] does not exist")); - return; - } - - ActionListener handler = ActionListener.wrap( - result -> { - if (result) { - initiateFollowing(request, listener); - } else { - listener.onResponse(new Response(true, false, false)); - } - }, - listener::onFailure); - // Can't use create index api here, because then index templates can alter the mappings / settings. - // And index templates could introduce settings / mappings that are incompatible with the leader index. - clusterService.submitStateUpdateTask("follow_index_action", new AckedClusterStateUpdateTask(request, handler) { - - @Override - protected Boolean newResponse(boolean acknowledged) { - return acknowledged; - } - - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - String followIndex = request.getFollowRequest().getFollowerIndex(); - IndexMetaData currentIndex = currentState.metaData().index(followIndex); - if (currentIndex != null) { - throw new ResourceAlreadyExistsException(currentIndex.getIndex()); - } - - MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - IndexMetaData.Builder imdBuilder = IndexMetaData.builder(followIndex); - - // Copy all settings, but overwrite a few settings. - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(leaderIndexMetaData.getSettings()); - // Overwriting UUID here, because otherwise we can't follow indices in the same cluster - settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex); - settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - imdBuilder.settings(settingsBuilder); - - // Copy mappings from leader IMD to follow IMD - for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { - imdBuilder.putMapping(cursor.value); - } - imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); - IndexMetaData followIMD = imdBuilder.build(); - mdBuilder.put(followIMD, false); - - ClusterState.Builder builder = ClusterState.builder(currentState); - builder.metaData(mdBuilder.build()); - ClusterState updatedState = builder.build(); - - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) - .addAsNew(updatedState.metaData().index(request.getFollowRequest().getFollowerIndex())); - updatedState = allocationService.reroute( - ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), - "follow index [" + request.getFollowRequest().getFollowerIndex() + "] created"); - - logger.info("[{}] creating index, cause [ccr_create_and_follow], shards [{}]/[{}]", - followIndex, followIMD.getNumberOfShards(), followIMD.getNumberOfReplicas()); - - return updatedState; - } - }); - } - - private void initiateFollowing(Request request, ActionListener listener) { - activeShardsObserver.waitForActiveShards(new String[]{request.followRequest.getFollowerIndex()}, - ActiveShardCount.DEFAULT, request.timeout(), result -> { - if (result) { - client.execute(FollowIndexAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( - r -> listener.onResponse(new Response(true, true, r.isAcknowledged())), - listener::onFailure - )); - } else { - listener.onResponse(new Response(true, false, false)); - } - }, listener::onFailure); - } - - @Override - protected ClusterBlockException checkBlock(Request request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowRequest().getFollowerIndex()); - } - - } - -} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java deleted file mode 100644 index 352456fd03913..0000000000000 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java +++ /dev/null @@ -1,573 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.ccr.action; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexingSlowLog; -import org.elasticsearch.index.SearchSlowLog; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.IndicesRequestCache; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.RemoteClusterAware; -import org.elasticsearch.transport.RemoteClusterService; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.ccr.CcrLicenseChecker; -import org.elasticsearch.xpack.ccr.CcrSettings; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.stream.Collectors; - -public class FollowIndexAction extends Action { - - public static final FollowIndexAction INSTANCE = new FollowIndexAction(); - public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; - - private FollowIndexAction() { - super(NAME); - } - - @Override - public AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); - } - - public static class Request extends ActionRequest implements ToXContentObject { - - private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); - private static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, - (args, followerIndex) -> { - if (args[1] != null) { - followerIndex = (String) args[1]; - } - return new Request((String) args[0], followerIndex, (Integer) args[2], (Integer) args[3], (Long) args[4], - (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]); - }); - - static { - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_INDEX_FIELD); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOWER_INDEX_FIELD); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_OPERATION_COUNT); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_READ_BATCHES); - PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_WRITE_BUFFER_SIZE); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.RETRY_TIMEOUT.getPreferredName()), - ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING); - PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName()), - ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); - } - - public static Request fromXContent(XContentParser parser, String followerIndex) throws IOException { - Request request = PARSER.parse(parser, followerIndex); - if (followerIndex != null) { - if (request.followerIndex == null) { - request.followerIndex = followerIndex; - } else { - if (request.followerIndex.equals(followerIndex) == false) { - throw new IllegalArgumentException("provided follower_index is not equal"); - } - } - } - return request; - } - - private String leaderIndex; - private String followerIndex; - private int maxBatchOperationCount; - private int maxConcurrentReadBatches; - private long maxOperationSizeInBytes; - private int maxConcurrentWriteBatches; - private int maxWriteBufferSize; - private TimeValue retryTimeout; - private TimeValue idleShardRetryDelay; - - public Request( - String leaderIndex, - String followerIndex, - Integer maxBatchOperationCount, - Integer maxConcurrentReadBatches, - Long maxOperationSizeInBytes, - Integer maxConcurrentWriteBatches, - Integer maxWriteBufferSize, - TimeValue retryTimeout, - TimeValue idleShardRetryDelay) { - - if (leaderIndex == null) { - throw new IllegalArgumentException("leader_index is missing"); - } - if (followerIndex == null) { - throw new IllegalArgumentException("follower_index is missing"); - } - if (maxBatchOperationCount == null) { - maxBatchOperationCount = ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT; - } - if (maxConcurrentReadBatches == null) { - maxConcurrentReadBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES; - } - if (maxOperationSizeInBytes == null) { - maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; - } - if (maxConcurrentWriteBatches == null) { - maxConcurrentWriteBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES; - } - if (maxWriteBufferSize == null) { - maxWriteBufferSize = ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE; - } - if (retryTimeout == null) { - retryTimeout = ShardFollowNodeTask.DEFAULT_RETRY_TIMEOUT; - } - if (idleShardRetryDelay == null) { - idleShardRetryDelay = ShardFollowNodeTask.DEFAULT_IDLE_SHARD_RETRY_DELAY; - } - - if (maxBatchOperationCount < 1) { - throw new IllegalArgumentException("maxBatchOperationCount must be larger than 0"); - } - if (maxConcurrentReadBatches < 1) { - throw new IllegalArgumentException("concurrent_processors must be larger than 0"); - } - if (maxOperationSizeInBytes <= 0) { - throw new IllegalArgumentException("processor_max_translog_bytes must be larger than 0"); - } - if (maxConcurrentWriteBatches < 1) { - throw new IllegalArgumentException("maxConcurrentWriteBatches must be larger than 0"); - } - if (maxWriteBufferSize < 1) { - throw new IllegalArgumentException("maxWriteBufferSize must be larger than 0"); - } - - this.leaderIndex = leaderIndex; - this.followerIndex = followerIndex; - this.maxBatchOperationCount = maxBatchOperationCount; - this.maxConcurrentReadBatches = maxConcurrentReadBatches; - this.maxOperationSizeInBytes = maxOperationSizeInBytes; - this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; - this.maxWriteBufferSize = maxWriteBufferSize; - this.retryTimeout = retryTimeout; - this.idleShardRetryDelay = idleShardRetryDelay; - } - - Request() { - } - - public String getLeaderIndex() { - return leaderIndex; - } - - public String getFollowerIndex() { - return followerIndex; - } - - public int getMaxBatchOperationCount() { - return maxBatchOperationCount; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - leaderIndex = in.readString(); - followerIndex = in.readString(); - maxBatchOperationCount = in.readVInt(); - maxConcurrentReadBatches = in.readVInt(); - maxOperationSizeInBytes = in.readVLong(); - maxConcurrentWriteBatches = in.readVInt(); - maxWriteBufferSize = in.readVInt(); - retryTimeout = in.readOptionalTimeValue(); - idleShardRetryDelay = in.readOptionalTimeValue(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(leaderIndex); - out.writeString(followerIndex); - out.writeVInt(maxBatchOperationCount); - out.writeVInt(maxConcurrentReadBatches); - out.writeVLong(maxOperationSizeInBytes); - out.writeVInt(maxConcurrentWriteBatches); - out.writeVInt(maxWriteBufferSize); - out.writeOptionalTimeValue(retryTimeout); - out.writeOptionalTimeValue(idleShardRetryDelay); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); - builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); - builder.field(ShardFollowTask.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); - builder.field(ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); - builder.field(ShardFollowTask.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); - builder.field(ShardFollowTask.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); - builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); - builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); - builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Request request = (Request) o; - return maxBatchOperationCount == request.maxBatchOperationCount && - maxConcurrentReadBatches == request.maxConcurrentReadBatches && - maxOperationSizeInBytes == request.maxOperationSizeInBytes && - maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && - maxWriteBufferSize == request.maxWriteBufferSize && - Objects.equals(retryTimeout, request.retryTimeout) && - Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay) && - Objects.equals(leaderIndex, request.leaderIndex) && - Objects.equals(followerIndex, request.followerIndex); - } - - @Override - public int hashCode() { - return Objects.hash( - leaderIndex, - followerIndex, - maxBatchOperationCount, - maxConcurrentReadBatches, - maxOperationSizeInBytes, - maxConcurrentWriteBatches, - maxWriteBufferSize, - retryTimeout, - idleShardRetryDelay - ); - } - } - - public static class TransportAction extends HandledTransportAction { - - private final Client client; - private final ThreadPool threadPool; - private final ClusterService clusterService; - private final RemoteClusterService remoteClusterService; - private final PersistentTasksService persistentTasksService; - private final IndicesService indicesService; - private final CcrLicenseChecker ccrLicenseChecker; - - @Inject - public TransportAction( - final Settings settings, - final ThreadPool threadPool, - final TransportService transportService, - final ActionFilters actionFilters, - final Client client, - final ClusterService clusterService, - final PersistentTasksService persistentTasksService, - final IndicesService indicesService, - final CcrLicenseChecker ccrLicenseChecker) { - super(settings, NAME, transportService, actionFilters, Request::new); - this.client = client; - this.threadPool = threadPool; - this.clusterService = clusterService; - this.remoteClusterService = transportService.getRemoteClusterService(); - this.persistentTasksService = persistentTasksService; - this.indicesService = indicesService; - this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); - } - - @Override - protected void doExecute(final Task task, - final Request request, - final ActionListener listener) { - if (ccrLicenseChecker.isCcrAllowed() == false) { - listener.onFailure(LicenseUtils.newComplianceException("ccr")); - return; - } - final String[] indices = new String[]{request.leaderIndex}; - final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); - if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { - followLocalIndex(request, listener); - } else { - assert remoteClusterIndices.size() == 1; - final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); - assert entry.getValue().size() == 1; - final String clusterAlias = entry.getKey(); - final String leaderIndex = entry.getValue().get(0); - followRemoteIndex(request, clusterAlias, leaderIndex, listener); - } - } - - private void followLocalIndex(final Request request, - final ActionListener listener) { - final ClusterState state = clusterService.state(); - final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); - // following an index in local cluster, so use local cluster state to fetch leader index metadata - final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getLeaderIndex()); - try { - start(request, null, leaderIndexMetadata, followerIndexMetadata, listener); - } catch (final IOException e) { - listener.onFailure(e); - } - } - - private void followRemoteIndex( - final Request request, - final String clusterAlias, - final String leaderIndex, - final ActionListener listener) { - final ClusterState state = clusterService.state(); - final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); - ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( - client, - clusterAlias, - leaderIndex, - listener::onFailure, - leaderIndexMetadata -> { - try { - start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, listener); - } catch (final IOException e) { - listener.onFailure(e); - } - }); - } - - /** - * Performs validation on the provided leader and follow {@link IndexMetaData} instances and then - * creates a persistent task for each leader primary shard. This persistent tasks track changes in the leader - * shard and replicate these changes to a follower shard. - * - * Currently the following validation is performed: - *

- */ - void start( - Request request, - String clusterNameAlias, - IndexMetaData leaderIndexMetadata, - IndexMetaData followIndexMetadata, - ActionListener handler) throws IOException { - - MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; - validate(request, leaderIndexMetadata, followIndexMetadata, mapperService); - final int numShards = followIndexMetadata.getNumberOfShards(); - final AtomicInteger counter = new AtomicInteger(numShards); - final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); - Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() - .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - for (int i = 0; i < numShards; i++) { - final int shardId = i; - String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; - - ShardFollowTask shardFollowTask = new ShardFollowTask(clusterNameAlias, - new ShardId(followIndexMetadata.getIndex(), shardId), - new ShardId(leaderIndexMetadata.getIndex(), shardId), - request.maxBatchOperationCount, request.maxConcurrentReadBatches, request.maxOperationSizeInBytes, - request.maxConcurrentWriteBatches, request.maxWriteBufferSize, request.retryTimeout, - request.idleShardRetryDelay, filteredHeaders); - persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, - new ActionListener>() { - @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { - responses.set(shardId, task); - finalizeResponse(); - } - - @Override - public void onFailure(Exception e) { - responses.set(shardId, e); - finalizeResponse(); - } - - void finalizeResponse() { - Exception error = null; - if (counter.decrementAndGet() == 0) { - for (int j = 0; j < responses.length(); j++) { - Object response = responses.get(j); - if (response instanceof Exception) { - if (error == null) { - error = (Exception) response; - } else { - error.addSuppressed((Throwable) response); - } - } - } - - if (error == null) { - // include task ids? - handler.onResponse(new AcknowledgedResponse(true)); - } else { - // TODO: cancel all started tasks - handler.onFailure(error); - } - } - } - } - ); - } - } - } - - private static final Set> WHITELISTED_SETTINGS; - - static { - Set> whiteListedSettings = new HashSet<>(); - whiteListedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); - - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); - whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); - whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); - whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); - whiteListedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); - - whiteListedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); - whiteListedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); - whiteListedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); - whiteListedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); - whiteListedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); - whiteListedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); - whiteListedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); - whiteListedSettings.add(IndexSettings.ALLOW_UNMAPPED); - whiteListedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); - whiteListedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); - - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); - whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); - whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); - - whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_SETTING); - whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); - - WHITELISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); - } - - static void validate(Request request, - IndexMetaData leaderIndex, - IndexMetaData followIndex, MapperService followerMapperService) { - if (leaderIndex == null) { - throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not exist"); - } - if (followIndex == null) { - throw new IllegalArgumentException("follow index [" + request.followerIndex + "] does not exist"); - } - if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { - throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not have soft deletes enabled"); - } - if (leaderIndex.getNumberOfShards() != followIndex.getNumberOfShards()) { - throw new IllegalArgumentException("leader index primary shards [" + leaderIndex.getNumberOfShards() + - "] does not match with the number of shards of the follow index [" + followIndex.getNumberOfShards() + "]"); - } - if (leaderIndex.getRoutingNumShards() != followIndex.getRoutingNumShards()) { - throw new IllegalArgumentException("leader index number_of_routing_shards [" + leaderIndex.getRoutingNumShards() + - "] does not match with the number_of_routing_shards of the follow index [" + followIndex.getRoutingNumShards() + "]"); - } - if (leaderIndex.getState() != IndexMetaData.State.OPEN || followIndex.getState() != IndexMetaData.State.OPEN) { - throw new IllegalArgumentException("leader and follow index must be open"); - } - if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(followIndex.getSettings()) == false) { - throw new IllegalArgumentException("the following index [" + request.followerIndex + "] is not ready " + - "to follow; the setting [" + CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey() + "] must be enabled."); - } - // Make a copy, remove settings that are allowed to be different and then compare if the settings are equal. - Settings leaderSettings = filter(leaderIndex.getSettings()); - Settings followerSettings = filter(followIndex.getSettings()); - if (leaderSettings.equals(followerSettings) == false) { - throw new IllegalArgumentException("the leader and follower index settings must be identical"); - } - - // Validates if the current follower mapping is mergable with the leader mapping. - // This also validates for example whether specific mapper plugins have been installed - followerMapperService.merge(leaderIndex, MapperService.MergeReason.MAPPING_RECOVERY); - } - - private static Settings filter(Settings originalSettings) { - Settings.Builder settings = Settings.builder().put(originalSettings); - // Remove settings that are always going to be different between leader and follow index: - settings.remove(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()); - settings.remove(IndexMetaData.SETTING_INDEX_UUID); - settings.remove(IndexMetaData.SETTING_INDEX_PROVIDED_NAME); - settings.remove(IndexMetaData.SETTING_CREATION_DATE); - - Iterator iterator = settings.keys().iterator(); - while (iterator.hasNext()) { - String key = iterator.next(); - for (Setting whitelistedSetting : WHITELISTED_SETTINGS) { - if (whitelistedSetting.match(key)) { - iterator.remove(); - break; - } - } - } - return settings.build(); - } - -} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java index a01fd8e3bc209..eb23244722d0a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternAction.java @@ -56,9 +56,9 @@ public static class Request extends AcknowledgedRequest implements ToXC PARSER.declareLong(Request::setMaxOperationSizeInBytes, AutoFollowPattern.MAX_BATCH_SIZE_IN_BYTES); PARSER.declareInt(Request::setMaxConcurrentWriteBatches, AutoFollowPattern.MAX_CONCURRENT_WRITE_BATCHES); PARSER.declareInt(Request::setMaxWriteBufferSize, AutoFollowPattern.MAX_WRITE_BUFFER_SIZE); - PARSER.declareField(Request::setRetryTimeout, + PARSER.declareField(Request::setMaxRetryDelay, (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.RETRY_TIMEOUT.getPreferredName()), - ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + ShardFollowTask.MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(Request::setIdleShardRetryDelay, (p, c) -> TimeValue.parseTimeValue(p.text(), AutoFollowPattern.IDLE_SHARD_RETRY_DELAY.getPreferredName()), ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); @@ -87,7 +87,7 @@ public static Request fromXContent(XContentParser parser, String remoteClusterAl private Long maxOperationSizeInBytes; private Integer maxConcurrentWriteBatches; private Integer maxWriteBufferSize; - private TimeValue retryTimeout; + private TimeValue maxRetryDelay; private TimeValue idleShardRetryDelay; @Override @@ -166,12 +166,12 @@ public void setMaxWriteBufferSize(Integer maxWriteBufferSize) { this.maxWriteBufferSize = maxWriteBufferSize; } - public TimeValue getRetryTimeout() { - return retryTimeout; + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; } - public void setRetryTimeout(TimeValue retryTimeout) { - this.retryTimeout = retryTimeout; + public void setMaxRetryDelay(TimeValue maxRetryDelay) { + this.maxRetryDelay = maxRetryDelay; } public TimeValue getIdleShardRetryDelay() { @@ -193,7 +193,7 @@ public void readFrom(StreamInput in) throws IOException { maxOperationSizeInBytes = in.readOptionalLong(); maxConcurrentWriteBatches = in.readOptionalVInt(); maxWriteBufferSize = in.readOptionalVInt(); - retryTimeout = in.readOptionalTimeValue(); + maxRetryDelay = in.readOptionalTimeValue(); idleShardRetryDelay = in.readOptionalTimeValue(); } @@ -208,7 +208,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalLong(maxOperationSizeInBytes); out.writeOptionalVInt(maxConcurrentWriteBatches); out.writeOptionalVInt(maxWriteBufferSize); - out.writeOptionalTimeValue(retryTimeout); + out.writeOptionalTimeValue(maxRetryDelay); out.writeOptionalTimeValue(idleShardRetryDelay); } @@ -236,8 +236,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (maxConcurrentWriteBatches != null) { builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); } - if (retryTimeout != null) { - builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + if (maxRetryDelay != null) { + builder.field(ShardFollowTask.MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); } if (idleShardRetryDelay != null) { builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); @@ -260,7 +260,7 @@ public boolean equals(Object o) { Objects.equals(maxOperationSizeInBytes, request.maxOperationSizeInBytes) && Objects.equals(maxConcurrentWriteBatches, request.maxConcurrentWriteBatches) && Objects.equals(maxWriteBufferSize, request.maxWriteBufferSize) && - Objects.equals(retryTimeout, request.retryTimeout) && + Objects.equals(maxRetryDelay, request.maxRetryDelay) && Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay); } @@ -275,7 +275,7 @@ public int hashCode() { maxOperationSizeInBytes, maxConcurrentWriteBatches, maxWriteBufferSize, - retryTimeout, + maxRetryDelay, idleShardRetryDelay ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index d102c6b5b7af8..b6f82783a56ab 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.io.IOException; import java.util.ArrayList; @@ -57,7 +58,7 @@ public static class Request extends SingleShardRequest { private long fromSeqNo; private int maxOperationCount; private ShardId shardId; - private long maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + private long maxOperationSizeInBytes = FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; public Request(ShardId shardId) { super(shardId.getIndexName()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 00e3aaaae2a8e..c221c097977a3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -10,35 +10,23 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.transport.NetworkExceptionHelper; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.persistent.AllocatedPersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; -import java.io.IOException; -import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.NavigableMap; -import java.util.Objects; import java.util.PriorityQueue; import java.util.Queue; import java.util.TreeMap; @@ -48,7 +36,6 @@ import java.util.function.Consumer; import java.util.function.LongConsumer; import java.util.function.LongSupplier; -import java.util.stream.Collectors; /** * The node task that fetch the write operations from a leader shard and @@ -56,20 +43,12 @@ */ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { - public static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; - public static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; - public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; - public static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; - public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; - private static final int RETRY_LIMIT = 10; - public static final TimeValue DEFAULT_RETRY_TIMEOUT = new TimeValue(500); - public static final TimeValue DEFAULT_IDLE_SHARD_RETRY_DELAY = TimeValue.timeValueSeconds(10); - + private static final int DELAY_MILLIS = 50; private static final Logger LOGGER = Loggers.getLogger(ShardFollowNodeTask.class); private final String leaderIndex; private final ShardFollowTask params; - private final TimeValue retryTimeout; + private final TimeValue maxRetryDelay; private final TimeValue idleShardChangesRequestDelay; private final BiConsumer scheduler; private final LongSupplier relativeTimeProvider; @@ -101,7 +80,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { this.params = params; this.scheduler = scheduler; this.relativeTimeProvider = relativeTimeProvider; - this.retryTimeout = params.getRetryTimeout(); + this.maxRetryDelay = params.getMaxRetryDelay(); this.idleShardChangesRequestDelay = params.getIdleShardRetryDelay(); /* * We keep track of the most recent fetch exceptions, with the number of exceptions that we track equal to the maximum number of @@ -379,20 +358,28 @@ private void updateMapping(LongConsumer handler, AtomicInteger retryCounter) { private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable task) { assert e != null; - if (shouldRetry(e)) { - if (isStopped() == false && retryCounter.incrementAndGet() <= RETRY_LIMIT) { - LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying...", params.getFollowShardId()), e); - scheduler.accept(retryTimeout, task); - } else { - markAsFailed(new ElasticsearchException("retrying failed [" + retryCounter.get() + - "] times, aborting...", e)); - } + if (shouldRetry(e) && isStopped() == false) { + int currentRetry = retryCounter.incrementAndGet(); + LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying [{}]", + params.getFollowShardId(), currentRetry), e); + long delay = computeDelay(currentRetry, maxRetryDelay.getMillis()); + scheduler.accept(TimeValue.timeValueMillis(delay), task); } else { markAsFailed(e); } } - private boolean shouldRetry(Exception e) { + static long computeDelay(int currentRetry, long maxRetryDelayInMillis) { + // Cap currentRetry to avoid overflow when computing n variable + int maxCurrentRetry = Math.min(currentRetry, 24); + long n = Math.round(Math.pow(2, maxCurrentRetry - 1)); + // + 1 here, because nextInt(...) bound is exclusive and otherwise the first delay would always be zero. + int k = Randomness.get().nextInt(Math.toIntExact(n + 1)); + int backOffDelay = k * DELAY_MILLIS; + return Math.min(backOffDelay, maxRetryDelayInMillis); + } + + private static boolean shouldRetry(Exception e) { return NetworkExceptionHelper.isConnectException(e) || NetworkExceptionHelper.isCloseConnectionException(e) || TransportActions.isShardNotAvailableException(e); @@ -421,7 +408,7 @@ public ShardId getFollowShardId() { } @Override - public synchronized Status getStatus() { + public synchronized ShardFollowNodeTaskStatus getStatus() { final long timeSinceLastFetchMillis; if (lastFetchTime != -1) { timeSinceLastFetchMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - lastFetchTime); @@ -429,7 +416,7 @@ public synchronized Status getStatus() { // To avoid confusion when ccr didn't yet execute a fetch: timeSinceLastFetchMillis = -1; } - return new Status( + return new ShardFollowNodeTaskStatus( leaderIndex, getFollowShardId().getId(), leaderGlobalCheckpoint, @@ -454,476 +441,4 @@ public synchronized Status getStatus() { timeSinceLastFetchMillis); } - public static class Status implements Task.Status { - - public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; - - static final ParseField LEADER_INDEX = new ParseField("leader_index"); - static final ParseField SHARD_ID = new ParseField("shard_id"); - static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); - static final ParseField LEADER_MAX_SEQ_NO_FIELD = new ParseField("leader_max_seq_no"); - static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); - static final ParseField FOLLOWER_MAX_SEQ_NO_FIELD = new ParseField("follower_max_seq_no"); - static final ParseField LAST_REQUESTED_SEQ_NO_FIELD = new ParseField("last_requested_seq_no"); - static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); - static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); - static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); - static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); - static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); - static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); - static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); - static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received"); - static final ParseField TOTAL_TRANSFERRED_BYTES = new ParseField("total_transferred_bytes"); - static final ParseField TOTAL_INDEX_TIME_MILLIS_FIELD = new ParseField("total_index_time_millis"); - static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); - static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); - static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); - static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); - static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); - - @SuppressWarnings("unchecked") - static final ConstructingObjectParser STATUS_PARSER = new ConstructingObjectParser<>(STATUS_PARSER_NAME, - args -> new Status( - (String) args[0], - (int) args[1], - (long) args[2], - (long) args[3], - (long) args[4], - (long) args[5], - (long) args[6], - (int) args[7], - (int) args[8], - (int) args[9], - (long) args[10], - (long) args[11], - (long) args[12], - (long) args[13], - (long) args[14], - (long) args[15], - (long) args[16], - (long) args[17], - (long) args[18], - (long) args[19], - new TreeMap<>( - ((List>) args[20]) - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), - (long) args[21])); - - public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; - - static final ConstructingObjectParser, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = - new ConstructingObjectParser<>( - FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, - args -> new AbstractMap.SimpleEntry<>((long) args[0], (ElasticsearchException) args[1])); - - static { - STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); - STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); - STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); - STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); - } - - static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); - static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); - - static { - FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); - FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( - ConstructingObjectParser.constructorArg(), - (p, c) -> ElasticsearchException.fromXContent(p), - FETCH_EXCEPTIONS_ENTRY_EXCEPTION); - } - - private final String leaderIndex; - - public String leaderIndex() { - return leaderIndex; - } - - private final int shardId; - - public int getShardId() { - return shardId; - } - - private final long leaderGlobalCheckpoint; - - public long leaderGlobalCheckpoint() { - return leaderGlobalCheckpoint; - } - - private final long leaderMaxSeqNo; - - public long leaderMaxSeqNo() { - return leaderMaxSeqNo; - } - - private final long followerGlobalCheckpoint; - - public long followerGlobalCheckpoint() { - return followerGlobalCheckpoint; - } - - private final long followerMaxSeqNo; - - public long followerMaxSeqNo() { - return followerMaxSeqNo; - } - - private final long lastRequestedSeqNo; - - public long lastRequestedSeqNo() { - return lastRequestedSeqNo; - } - - private final int numberOfConcurrentReads; - - public int numberOfConcurrentReads() { - return numberOfConcurrentReads; - } - - private final int numberOfConcurrentWrites; - - public int numberOfConcurrentWrites() { - return numberOfConcurrentWrites; - } - - private final int numberOfQueuedWrites; - - public int numberOfQueuedWrites() { - return numberOfQueuedWrites; - } - - private final long mappingVersion; - - public long mappingVersion() { - return mappingVersion; - } - - private final long totalFetchTimeMillis; - - public long totalFetchTimeMillis() { - return totalFetchTimeMillis; - } - - private final long numberOfSuccessfulFetches; - - public long numberOfSuccessfulFetches() { - return numberOfSuccessfulFetches; - } - - private final long numberOfFailedFetches; - - public long numberOfFailedFetches() { - return numberOfFailedFetches; - } - - private final long operationsReceived; - - public long operationsReceived() { - return operationsReceived; - } - - private final long totalTransferredBytes; - - public long totalTransferredBytes() { - return totalTransferredBytes; - } - - private final long totalIndexTimeMillis; - - public long totalIndexTimeMillis() { - return totalIndexTimeMillis; - } - - private final long numberOfSuccessfulBulkOperations; - - public long numberOfSuccessfulBulkOperations() { - return numberOfSuccessfulBulkOperations; - } - - private final long numberOfFailedBulkOperations; - - public long numberOfFailedBulkOperations() { - return numberOfFailedBulkOperations; - } - - private final long numberOfOperationsIndexed; - - public long numberOfOperationsIndexed() { - return numberOfOperationsIndexed; - } - - private final NavigableMap fetchExceptions; - - public NavigableMap fetchExceptions() { - return fetchExceptions; - } - - private final long timeSinceLastFetchMillis; - - public long timeSinceLastFetchMillis() { - return timeSinceLastFetchMillis; - } - - Status( - final String leaderIndex, - final int shardId, - final long leaderGlobalCheckpoint, - final long leaderMaxSeqNo, - final long followerGlobalCheckpoint, - final long followerMaxSeqNo, - final long lastRequestedSeqNo, - final int numberOfConcurrentReads, - final int numberOfConcurrentWrites, - final int numberOfQueuedWrites, - final long mappingVersion, - final long totalFetchTimeMillis, - final long numberOfSuccessfulFetches, - final long numberOfFailedFetches, - final long operationsReceived, - final long totalTransferredBytes, - final long totalIndexTimeMillis, - final long numberOfSuccessfulBulkOperations, - final long numberOfFailedBulkOperations, - final long numberOfOperationsIndexed, - final NavigableMap fetchExceptions, - final long timeSinceLastFetchMillis) { - this.leaderIndex = leaderIndex; - this.shardId = shardId; - this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; - this.leaderMaxSeqNo = leaderMaxSeqNo; - this.followerGlobalCheckpoint = followerGlobalCheckpoint; - this.followerMaxSeqNo = followerMaxSeqNo; - this.lastRequestedSeqNo = lastRequestedSeqNo; - this.numberOfConcurrentReads = numberOfConcurrentReads; - this.numberOfConcurrentWrites = numberOfConcurrentWrites; - this.numberOfQueuedWrites = numberOfQueuedWrites; - this.mappingVersion = mappingVersion; - this.totalFetchTimeMillis = totalFetchTimeMillis; - this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; - this.numberOfFailedFetches = numberOfFailedFetches; - this.operationsReceived = operationsReceived; - this.totalTransferredBytes = totalTransferredBytes; - this.totalIndexTimeMillis = totalIndexTimeMillis; - this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; - this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; - this.numberOfOperationsIndexed = numberOfOperationsIndexed; - this.fetchExceptions = Objects.requireNonNull(fetchExceptions); - this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; - } - - public Status(final StreamInput in) throws IOException { - this.leaderIndex = in.readString(); - this.shardId = in.readVInt(); - this.leaderGlobalCheckpoint = in.readZLong(); - this.leaderMaxSeqNo = in.readZLong(); - this.followerGlobalCheckpoint = in.readZLong(); - this.followerMaxSeqNo = in.readZLong(); - this.lastRequestedSeqNo = in.readZLong(); - this.numberOfConcurrentReads = in.readVInt(); - this.numberOfConcurrentWrites = in.readVInt(); - this.numberOfQueuedWrites = in.readVInt(); - this.mappingVersion = in.readVLong(); - this.totalFetchTimeMillis = in.readVLong(); - this.numberOfSuccessfulFetches = in.readVLong(); - this.numberOfFailedFetches = in.readVLong(); - this.operationsReceived = in.readVLong(); - this.totalTransferredBytes = in.readVLong(); - this.totalIndexTimeMillis = in.readVLong(); - this.numberOfSuccessfulBulkOperations = in.readVLong(); - this.numberOfFailedBulkOperations = in.readVLong(); - this.numberOfOperationsIndexed = in.readVLong(); - this.fetchExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, StreamInput::readException)); - this.timeSinceLastFetchMillis = in.readZLong(); - } - - @Override - public String getWriteableName() { - return STATUS_PARSER_NAME; - } - - @Override - public void writeTo(final StreamOutput out) throws IOException { - out.writeString(leaderIndex); - out.writeVInt(shardId); - out.writeZLong(leaderGlobalCheckpoint); - out.writeZLong(leaderMaxSeqNo); - out.writeZLong(followerGlobalCheckpoint); - out.writeZLong(followerMaxSeqNo); - out.writeZLong(lastRequestedSeqNo); - out.writeVInt(numberOfConcurrentReads); - out.writeVInt(numberOfConcurrentWrites); - out.writeVInt(numberOfQueuedWrites); - out.writeVLong(mappingVersion); - out.writeVLong(totalFetchTimeMillis); - out.writeVLong(numberOfSuccessfulFetches); - out.writeVLong(numberOfFailedFetches); - out.writeVLong(operationsReceived); - out.writeVLong(totalTransferredBytes); - out.writeVLong(totalIndexTimeMillis); - out.writeVLong(numberOfSuccessfulBulkOperations); - out.writeVLong(numberOfFailedBulkOperations); - out.writeVLong(numberOfOperationsIndexed); - out.writeMap(fetchExceptions, StreamOutput::writeVLong, StreamOutput::writeException); - out.writeZLong(timeSinceLastFetchMillis); - } - - @Override - public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { - builder.startObject(); - { - builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); - builder.field(SHARD_ID.getPreferredName(), shardId); - builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); - builder.field(LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), leaderMaxSeqNo); - builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); - builder.field(FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), followerMaxSeqNo); - builder.field(LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), lastRequestedSeqNo); - builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); - builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); - builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); - builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); - builder.humanReadableField( - TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), - "total_fetch_time", - new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS)); - builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches); - builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches); - builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived); - builder.humanReadableField( - TOTAL_TRANSFERRED_BYTES.getPreferredName(), - "total_transferred", - new ByteSizeValue(totalTransferredBytes, ByteSizeUnit.BYTES)); - builder.humanReadableField( - TOTAL_INDEX_TIME_MILLIS_FIELD.getPreferredName(), - "total_index_time", - new TimeValue(totalIndexTimeMillis, TimeUnit.MILLISECONDS)); - builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); - builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); - builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); - builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); - { - for (final Map.Entry entry : fetchExceptions.entrySet()) { - builder.startObject(); - { - builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); - builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); - builder.startObject(); - { - ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endArray(); - builder.humanReadableField( - TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), - "time_since_last_fetch", - new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); - } - builder.endObject(); - return builder; - } - - public static Status fromXContent(final XContentParser parser) { - return STATUS_PARSER.apply(parser, null); - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - final Status that = (Status) o; - return leaderIndex.equals(that.leaderIndex) && - shardId == that.shardId && - leaderGlobalCheckpoint == that.leaderGlobalCheckpoint && - leaderMaxSeqNo == that.leaderMaxSeqNo && - followerGlobalCheckpoint == that.followerGlobalCheckpoint && - followerMaxSeqNo == that.followerMaxSeqNo && - lastRequestedSeqNo == that.lastRequestedSeqNo && - numberOfConcurrentReads == that.numberOfConcurrentReads && - numberOfConcurrentWrites == that.numberOfConcurrentWrites && - numberOfQueuedWrites == that.numberOfQueuedWrites && - mappingVersion == that.mappingVersion && - totalFetchTimeMillis == that.totalFetchTimeMillis && - numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && - numberOfFailedFetches == that.numberOfFailedFetches && - operationsReceived == that.operationsReceived && - totalTransferredBytes == that.totalTransferredBytes && - numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && - numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && - numberOfOperationsIndexed == that.numberOfOperationsIndexed && - /* - * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal - * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by - * keys. - */ - fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && - getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && - timeSinceLastFetchMillis == that.timeSinceLastFetchMillis; - } - - @Override - public int hashCode() { - return Objects.hash( - leaderIndex, - shardId, - leaderGlobalCheckpoint, - leaderMaxSeqNo, - followerGlobalCheckpoint, - followerMaxSeqNo, - lastRequestedSeqNo, - numberOfConcurrentReads, - numberOfConcurrentWrites, - numberOfQueuedWrites, - mappingVersion, - totalFetchTimeMillis, - numberOfSuccessfulFetches, - numberOfFailedFetches, - operationsReceived, - totalTransferredBytes, - numberOfSuccessfulBulkOperations, - numberOfFailedBulkOperations, - numberOfOperationsIndexed, - /* - * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the - * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. - */ - fetchExceptions.keySet(), - getFetchExceptionMessages(this), - timeSinceLastFetchMillis); - } - - private static List getFetchExceptionMessages(final Status status) { - return status.fetchExceptions().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); - } - - public String toString() { - return Strings.toString(this); - } - - } - } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index 82482792f3907..9da19cb1998d7 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -48,7 +48,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { public static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - public static final ParseField RETRY_TIMEOUT = new ParseField("retry_timeout"); + public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); @SuppressWarnings("unchecked") @@ -71,8 +71,8 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_WRITE_BATCHES); PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_SIZE); PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), RETRY_TIMEOUT.getPreferredName()), - RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), + MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.constructorArg(), (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); @@ -87,13 +87,13 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { private final long maxBatchSizeInBytes; private final int maxConcurrentWriteBatches; private final int maxWriteBufferSize; - private final TimeValue retryTimeout; + private final TimeValue maxRetryDelay; private final TimeValue idleShardRetryDelay; private final Map headers; ShardFollowTask(String leaderClusterAlias, ShardId followShardId, ShardId leaderShardId, int maxBatchOperationCount, int maxConcurrentReadBatches, long maxBatchSizeInBytes, int maxConcurrentWriteBatches, - int maxWriteBufferSize, TimeValue retryTimeout, TimeValue idleShardRetryDelay, Map headers) { + int maxWriteBufferSize, TimeValue maxRetryDelay, TimeValue idleShardRetryDelay, Map headers) { this.leaderClusterAlias = leaderClusterAlias; this.followShardId = followShardId; this.leaderShardId = leaderShardId; @@ -102,7 +102,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { this.maxBatchSizeInBytes = maxBatchSizeInBytes; this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; this.maxWriteBufferSize = maxWriteBufferSize; - this.retryTimeout = retryTimeout; + this.maxRetryDelay = maxRetryDelay; this.idleShardRetryDelay = idleShardRetryDelay; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } @@ -116,7 +116,7 @@ public ShardFollowTask(StreamInput in) throws IOException { this.maxBatchSizeInBytes = in.readVLong(); this.maxConcurrentWriteBatches = in.readVInt(); this.maxWriteBufferSize = in.readVInt(); - this.retryTimeout = in.readTimeValue(); + this.maxRetryDelay = in.readTimeValue(); this.idleShardRetryDelay = in.readTimeValue(); this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); } @@ -153,8 +153,8 @@ public long getMaxBatchSizeInBytes() { return maxBatchSizeInBytes; } - public TimeValue getRetryTimeout() { - return retryTimeout; + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; } public TimeValue getIdleShardRetryDelay() { @@ -184,7 +184,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(maxBatchSizeInBytes); out.writeVInt(maxConcurrentWriteBatches); out.writeVInt(maxWriteBufferSize); - out.writeTimeValue(retryTimeout); + out.writeTimeValue(maxRetryDelay); out.writeTimeValue(idleShardRetryDelay); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @@ -210,7 +210,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxBatchSizeInBytes); builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); - builder.field(RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); builder.field(HEADERS.getPreferredName(), headers); return builder.endObject(); @@ -229,7 +229,7 @@ public boolean equals(Object o) { maxConcurrentWriteBatches == that.maxConcurrentWriteBatches && maxBatchSizeInBytes == that.maxBatchSizeInBytes && maxWriteBufferSize == that.maxWriteBufferSize && - Objects.equals(retryTimeout, that.retryTimeout) && + Objects.equals(maxRetryDelay, that.maxRetryDelay) && Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay) && Objects.equals(headers, that.headers); } @@ -237,7 +237,7 @@ public boolean equals(Object o) { @Override public int hashCode() { return Objects.hash(leaderClusterAlias, followShardId, leaderShardId, maxBatchOperationCount, maxConcurrentReadBatches, - maxConcurrentWriteBatches, maxBatchSizeInBytes, maxWriteBufferSize, retryTimeout, idleShardRetryDelay, headers); + maxConcurrentWriteBatches, maxBatchSizeInBytes, maxWriteBufferSize, maxRetryDelay, idleShardRetryDelay, headers); } public String toString() { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java index 3b5d0ac53cf81..f227a56f1582f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import java.io.IOException; import java.util.Arrays; @@ -33,8 +34,8 @@ public class TransportCcrStatsAction extends TransportTasksAction< ShardFollowNodeTask, - CcrStatsAction.TasksRequest, - CcrStatsAction.TasksResponse, CcrStatsAction.TaskResponse> { + CcrStatsAction.StatsRequest, + CcrStatsAction.StatsResponses, CcrStatsAction.StatsResponse> { private final IndexNameExpressionResolver resolver; private final CcrLicenseChecker ccrLicenseChecker; @@ -53,8 +54,8 @@ public TransportCcrStatsAction( clusterService, transportService, actionFilters, - CcrStatsAction.TasksRequest::new, - CcrStatsAction.TasksResponse::new, + CcrStatsAction.StatsRequest::new, + CcrStatsAction.StatsResponses::new, Ccr.CCR_THREAD_POOL_NAME); this.resolver = Objects.requireNonNull(resolver); this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); @@ -63,8 +64,8 @@ public TransportCcrStatsAction( @Override protected void doExecute( final Task task, - final CcrStatsAction.TasksRequest request, - final ActionListener listener) { + final CcrStatsAction.StatsRequest request, + final ActionListener listener) { if (ccrLicenseChecker.isCcrAllowed() == false) { listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; @@ -73,21 +74,21 @@ protected void doExecute( } @Override - protected CcrStatsAction.TasksResponse newResponse( - final CcrStatsAction.TasksRequest request, - final List taskResponses, + protected CcrStatsAction.StatsResponses newResponse( + final CcrStatsAction.StatsRequest request, + final List statsRespons, final List taskOperationFailures, final List failedNodeExceptions) { - return new CcrStatsAction.TasksResponse(taskOperationFailures, failedNodeExceptions, taskResponses); + return new CcrStatsAction.StatsResponses(taskOperationFailures, failedNodeExceptions, statsRespons); } @Override - protected CcrStatsAction.TaskResponse readTaskResponse(final StreamInput in) throws IOException { - return new CcrStatsAction.TaskResponse(in); + protected CcrStatsAction.StatsResponse readTaskResponse(final StreamInput in) throws IOException { + return new CcrStatsAction.StatsResponse(in); } @Override - protected void processTasks(final CcrStatsAction.TasksRequest request, final Consumer operation) { + protected void processTasks(final CcrStatsAction.StatsRequest request, final Consumer operation) { final ClusterState state = clusterService.state(); final Set concreteIndices = new HashSet<>(Arrays.asList(resolver.concreteIndexNames(state, request))); for (final Task task : taskManager.getTasks().values()) { @@ -102,10 +103,10 @@ protected void processTasks(final CcrStatsAction.TasksRequest request, final Con @Override protected void taskOperation( - final CcrStatsAction.TasksRequest request, + final CcrStatsAction.StatsRequest request, final ShardFollowNodeTask task, - final ActionListener listener) { - listener.onResponse(new CcrStatsAction.TaskResponse(task.getFollowShardId(), task.getStatus())); + final ActionListener listener) { + listener.onResponse(new CcrStatsAction.StatsResponse(task.getFollowShardId(), task.getStatus())); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java new file mode 100644 index 0000000000000..b99b569a525ca --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCreateAndFollowIndexAction.java @@ -0,0 +1,231 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.ActiveShardsObserver; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public final class TransportCreateAndFollowIndexAction + extends TransportMasterNodeAction { + + private final Client client; + private final AllocationService allocationService; + private final RemoteClusterService remoteClusterService; + private final ActiveShardsObserver activeShardsObserver; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportCreateAndFollowIndexAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ClusterService clusterService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Client client, + final AllocationService allocationService, + final CcrLicenseChecker ccrLicenseChecker) { + super( + settings, + CreateAndFollowIndexAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver, + CreateAndFollowIndexAction.Request::new); + this.client = client; + this.allocationService = allocationService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected CreateAndFollowIndexAction.Response newResponse() { + return new CreateAndFollowIndexAction.Response(); + } + + @Override + protected void masterOperation( + final CreateAndFollowIndexAction.Request request, + final ClusterState state, + final ActionListener listener) throws Exception { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + final String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + createFollowerIndexAndFollowLocalIndex(request, state, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + createFollowerIndexAndFollowRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } + + private void createFollowerIndexAndFollowLocalIndex( + final CreateAndFollowIndexAction.Request request, + final ClusterState state, + final ActionListener listener) { + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getFollowRequest().getLeaderIndex()); + createFollowerIndex(leaderIndexMetadata, request, listener); + } + + private void createFollowerIndexAndFollowRemoteIndex( + final CreateAndFollowIndexAction.Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener::onFailure, + leaderIndexMetaData -> createFollowerIndex(leaderIndexMetaData, request, listener)); + } + + private void createFollowerIndex( + final IndexMetaData leaderIndexMetaData, + final CreateAndFollowIndexAction.Request request, + final ActionListener listener) { + if (leaderIndexMetaData == null) { + listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + + "] does not exist")); + return; + } + + ActionListener handler = ActionListener.wrap( + result -> { + if (result) { + initiateFollowing(request, listener); + } else { + listener.onResponse(new CreateAndFollowIndexAction.Response(true, false, false)); + } + }, + listener::onFailure); + // Can't use create index api here, because then index templates can alter the mappings / settings. + // And index templates could introduce settings / mappings that are incompatible with the leader index. + clusterService.submitStateUpdateTask("follow_index_action", new AckedClusterStateUpdateTask(request, handler) { + + @Override + protected Boolean newResponse(final boolean acknowledged) { + return acknowledged; + } + + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + String followIndex = request.getFollowRequest().getFollowerIndex(); + IndexMetaData currentIndex = currentState.metaData().index(followIndex); + if (currentIndex != null) { + throw new ResourceAlreadyExistsException(currentIndex.getIndex()); + } + + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + IndexMetaData.Builder imdBuilder = IndexMetaData.builder(followIndex); + + // Copy all settings, but overwrite a few settings. + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(leaderIndexMetaData.getSettings()); + // Overwriting UUID here, because otherwise we can't follow indices in the same cluster + settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); + settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex); + settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + imdBuilder.settings(settingsBuilder); + + // Copy mappings from leader IMD to follow IMD + for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { + imdBuilder.putMapping(cursor.value); + } + imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); + IndexMetaData followIMD = imdBuilder.build(); + mdBuilder.put(followIMD, false); + + ClusterState.Builder builder = ClusterState.builder(currentState); + builder.metaData(mdBuilder.build()); + ClusterState updatedState = builder.build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) + .addAsNew(updatedState.metaData().index(request.getFollowRequest().getFollowerIndex())); + updatedState = allocationService.reroute( + ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), + "follow index [" + request.getFollowRequest().getFollowerIndex() + "] created"); + + logger.info("[{}] creating index, cause [ccr_create_and_follow], shards [{}]/[{}]", + followIndex, followIMD.getNumberOfShards(), followIMD.getNumberOfReplicas()); + + return updatedState; + } + }); + } + + private void initiateFollowing( + final CreateAndFollowIndexAction.Request request, + final ActionListener listener) { + activeShardsObserver.waitForActiveShards(new String[]{request.getFollowRequest().getFollowerIndex()}, + ActiveShardCount.DEFAULT, request.timeout(), result -> { + if (result) { + client.execute(FollowIndexAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( + r -> listener.onResponse(new CreateAndFollowIndexAction.Response(true, true, r.isAcknowledged())), + listener::onFailure + )); + } else { + listener.onResponse(new CreateAndFollowIndexAction.Response(true, false, false)); + } + }, listener::onFailure); + } + + @Override + protected ClusterBlockException checkBlock(final CreateAndFollowIndexAction.Request request, final ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowRequest().getFollowerIndex()); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java new file mode 100644 index 0000000000000..d1d826997c673 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java @@ -0,0 +1,338 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexingSlowLog; +import org.elasticsearch.index.SearchSlowLog; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.stream.Collectors; + +public class TransportFollowIndexAction extends HandledTransportAction { + + private final Client client; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final RemoteClusterService remoteClusterService; + private final PersistentTasksService persistentTasksService; + private final IndicesService indicesService; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportFollowIndexAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ActionFilters actionFilters, + final Client client, + final ClusterService clusterService, + final PersistentTasksService persistentTasksService, + final IndicesService indicesService, + final CcrLicenseChecker ccrLicenseChecker) { + super(settings, FollowIndexAction.NAME, transportService, actionFilters, FollowIndexAction.Request::new); + this.client = client; + this.threadPool = threadPool; + this.clusterService = clusterService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.persistentTasksService = persistentTasksService; + this.indicesService = indicesService; + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected void doExecute(final Task task, + final FollowIndexAction.Request request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed() == false) { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + return; + } + final String[] indices = new String[]{request.getLeaderIndex()}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + followLocalIndex(request, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + followRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } + + private void followLocalIndex(final FollowIndexAction.Request request, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getLeaderIndex()); + try { + start(request, null, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + } + + private void followRemoteIndex( + final FollowIndexAction.Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener::onFailure, + leaderIndexMetadata -> { + try { + start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + }); + } + + /** + * Performs validation on the provided leader and follow {@link IndexMetaData} instances and then + * creates a persistent task for each leader primary shard. This persistent tasks track changes in the leader + * shard and replicate these changes to a follower shard. + * + * Currently the following validation is performed: + *
    + *
  • The leader index and follow index need to have the same number of primary shards
  • + *
+ */ + void start( + FollowIndexAction.Request request, + String clusterNameAlias, + IndexMetaData leaderIndexMetadata, + IndexMetaData followIndexMetadata, + ActionListener handler) throws IOException { + + MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; + validate(request, leaderIndexMetadata, followIndexMetadata, mapperService); + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + + ShardFollowTask shardFollowTask = new ShardFollowTask( + clusterNameAlias, + new ShardId(followIndexMetadata.getIndex(), shardId), + new ShardId(leaderIndexMetadata.getIndex(), shardId), + request.getMaxBatchOperationCount(), + request.getMaxConcurrentReadBatches(), + request.getMaxOperationSizeInBytes(), + request.getMaxConcurrentWriteBatches(), + request.getMaxWriteBufferSize(), + request.getMaxRetryDelay(), + request.getIdleShardRetryDelay(), + filteredHeaders); + persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + handler.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + handler.onFailure(error); + } + } + } + } + ); + } + } + + static void validate( + final FollowIndexAction.Request request, + final IndexMetaData leaderIndex, + final IndexMetaData followIndex, + final MapperService followerMapperService) { + if (leaderIndex == null) { + throw new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not exist"); + } + if (followIndex == null) { + throw new IllegalArgumentException("follow index [" + request.getFollowerIndex() + "] does not exist"); + } + if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { + throw new IllegalArgumentException("leader index [" + request.getLeaderIndex() + "] does not have soft deletes enabled"); + } + if (leaderIndex.getNumberOfShards() != followIndex.getNumberOfShards()) { + throw new IllegalArgumentException("leader index primary shards [" + leaderIndex.getNumberOfShards() + + "] does not match with the number of shards of the follow index [" + followIndex.getNumberOfShards() + "]"); + } + if (leaderIndex.getRoutingNumShards() != followIndex.getRoutingNumShards()) { + throw new IllegalArgumentException("leader index number_of_routing_shards [" + leaderIndex.getRoutingNumShards() + + "] does not match with the number_of_routing_shards of the follow index [" + followIndex.getRoutingNumShards() + "]"); + } + if (leaderIndex.getState() != IndexMetaData.State.OPEN || followIndex.getState() != IndexMetaData.State.OPEN) { + throw new IllegalArgumentException("leader and follow index must be open"); + } + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(followIndex.getSettings()) == false) { + throw new IllegalArgumentException("the following index [" + request.getFollowerIndex() + "] is not ready " + + "to follow; the setting [" + CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey() + "] must be enabled."); + } + // Make a copy, remove settings that are allowed to be different and then compare if the settings are equal. + Settings leaderSettings = filter(leaderIndex.getSettings()); + Settings followerSettings = filter(followIndex.getSettings()); + if (leaderSettings.equals(followerSettings) == false) { + throw new IllegalArgumentException("the leader and follower index settings must be identical"); + } + + // Validates if the current follower mapping is mergable with the leader mapping. + // This also validates for example whether specific mapper plugins have been installed + followerMapperService.merge(leaderIndex, MapperService.MergeReason.MAPPING_RECOVERY); + } + + private static final Set> WHITE_LISTED_SETTINGS; + + static { + final Set> whiteListedSettings = new HashSet<>(); + whiteListedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); + + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); + whiteListedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); + + whiteListedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); + whiteListedSettings.add(IndexSettings.ALLOW_UNMAPPED); + whiteListedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); + whiteListedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); + + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); + + WHITE_LISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); + } + + private static Settings filter(Settings originalSettings) { + Settings.Builder settings = Settings.builder().put(originalSettings); + // Remove settings that are always going to be different between leader and follow index: + settings.remove(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()); + settings.remove(IndexMetaData.SETTING_INDEX_UUID); + settings.remove(IndexMetaData.SETTING_INDEX_PROVIDED_NAME); + settings.remove(IndexMetaData.SETTING_CREATION_DATE); + + Iterator iterator = settings.keys().iterator(); + while (iterator.hasNext()) { + String key = iterator.next(); + for (Setting whitelistedSetting : WHITE_LISTED_SETTINGS) { + if (whitelistedSetting.match(key)) { + iterator.remove(); + break; + } + } + } + return settings.build(); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index d58e4ce5045c1..8dd463ae43ab8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -154,7 +154,7 @@ static ClusterState innerPut(PutAutoFollowPatternAction.Request request, request.getMaxOperationSizeInBytes(), request.getMaxConcurrentWriteBatches(), request.getMaxWriteBufferSize(), - request.getRetryTimeout(), + request.getMaxRetryDelay(), request.getIdleShardRetryDelay(), filteredHeaders); patterns.put(request.getLeaderClusterAlias(), autoFollowPattern); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowIndexAction.java new file mode 100644 index 0000000000000..05cde0eab8523 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowIndexAction.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class TransportUnfollowIndexAction extends HandledTransportAction { + + private final Client client; + private final PersistentTasksService persistentTasksService; + + @Inject + public TransportUnfollowIndexAction( + final Settings settings, + final TransportService transportService, + final ActionFilters actionFilters, + final Client client, + final PersistentTasksService persistentTasksService) { + super(settings, UnfollowIndexAction.NAME, transportService, actionFilters, UnfollowIndexAction.Request::new); + this.client = client; + this.persistentTasksService = persistentTasksService; + } + + @Override + protected void doExecute( + final Task task, + final UnfollowIndexAction.Request request, + final ActionListener listener) { + + client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { + IndexMetaData followIndexMetadata = r.getState().getMetaData().index(request.getFollowIndex()); + if (followIndexMetadata == null) { + listener.onFailure(new IllegalArgumentException("follow index [" + request.getFollowIndex() + "] does not exist")); + return; + } + + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + persistentTasksService.sendRemoveRequest(taskId, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + listener.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + listener.onFailure(error); + } + } + } + }); + } + }, listener::onFailure)); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java deleted file mode 100644 index 93b2bcc3e4096..0000000000000 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ccr.action; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; - -public class UnfollowIndexAction extends Action { - - public static final UnfollowIndexAction INSTANCE = new UnfollowIndexAction(); - public static final String NAME = "cluster:admin/xpack/ccr/unfollow_index"; - - private UnfollowIndexAction() { - super(NAME); - } - - @Override - public AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); - } - - public static class Request extends ActionRequest { - - private String followIndex; - - public String getFollowIndex() { - return followIndex; - } - - public void setFollowIndex(String followIndex) { - this.followIndex = followIndex; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - followIndex = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(followIndex); - } - } - - public static class TransportAction extends HandledTransportAction { - - private final Client client; - private final PersistentTasksService persistentTasksService; - - @Inject - public TransportAction(Settings settings, - TransportService transportService, - ActionFilters actionFilters, - Client client, - PersistentTasksService persistentTasksService) { - super(settings, NAME, transportService, actionFilters, Request::new); - this.client = client; - this.persistentTasksService = persistentTasksService; - } - - @Override - protected void doExecute(Task task, - Request request, - ActionListener listener) { - - client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { - IndexMetaData followIndexMetadata = r.getState().getMetaData().index(request.followIndex); - if (followIndexMetadata == null) { - listener.onFailure(new IllegalArgumentException("follow index [" + request.followIndex + "] does not exist")); - return; - } - - final int numShards = followIndexMetadata.getNumberOfShards(); - final AtomicInteger counter = new AtomicInteger(numShards); - final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); - for (int i = 0; i < numShards; i++) { - final int shardId = i; - String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; - persistentTasksService.sendRemoveRequest(taskId, - new ActionListener>() { - @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { - responses.set(shardId, task); - finalizeResponse(); - } - - @Override - public void onFailure(Exception e) { - responses.set(shardId, e); - finalizeResponse(); - } - - void finalizeResponse() { - Exception error = null; - if (counter.decrementAndGet() == 0) { - for (int j = 0; j < responses.length(); j++) { - Object response = responses.get(j); - if (response instanceof Exception) { - if (error == null) { - error = (Exception) response; - } else { - error.addSuppressed((Throwable) response); - } - } - } - - if (error == null) { - // include task ids? - listener.onResponse(new AcknowledgedResponse(true)); - } else { - // TODO: cancel all started tasks - listener.onFailure(error); - } - } - } - }); - } - }, listener::onFailure)); - } - } - -} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index df34fd6cd45b2..de285dba19ec2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import java.io.IOException; @@ -33,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { - final CcrStatsAction.TasksRequest request = new CcrStatsAction.TasksRequest(); + final CcrStatsAction.StatsRequest request = new CcrStatsAction.StatsRequest(); request.setIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); request.setIndicesOptions(IndicesOptions.fromRequest(restRequest, request.indicesOptions())); return channel -> client.execute(CcrStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java index 4d9079b36c943..8816760f52617 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java @@ -14,8 +14,8 @@ import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.INSTANCE; -import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.Request; +import static org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction.Request; public class RestCreateAndFollowIndexAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java index 88f5b74f4b141..8a1d7d778bd86 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java @@ -15,8 +15,8 @@ import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.INSTANCE; -import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.Request; +import static org.elasticsearch.xpack.core.ccr.action.FollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.FollowIndexAction.Request; public class RestFollowIndexAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java index 2df6c77379b24..9a82717b621bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java @@ -14,8 +14,8 @@ import java.io.IOException; -import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.INSTANCE; -import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.Request; +import static org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction.Request; public class RestUnfollowIndexAction extends BaseRestHandler { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index 06cafc4777a49..c7cf12ad76415 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -12,17 +12,22 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; -import org.elasticsearch.xpack.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; -import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import org.elasticsearch.xpack.ccr.action.PutAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; import java.util.Collection; import java.util.Collections; @@ -85,9 +90,9 @@ public void onFailure(final Exception e) { public void testThatCcrStatsAreUnavailableWithNonCompliantLicense() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); - client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.TasksRequest(), new ActionListener() { + client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.StatsRequest(), new ActionListener() { @Override - public void onResponse(final CcrStatsAction.TasksResponse tasksResponse) { + public void onResponse(final CcrStatsAction.StatsResponses statsResponses) { latch.countDown(); fail(); } @@ -127,6 +132,40 @@ public void onFailure(final Exception e) { } public void testAutoFollowCoordinatorLogsSkippingAutoFollowCoordinationWithNonCompliantLicense() throws Exception { + // Update the cluster state so that we have auto follow patterns and verify that we log a warning in case of incompatible license: + CountDownLatch latch = new CountDownLatch(1); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + clusterService.submitStateUpdateTask("test-add-auto-follow-pattern", new ClusterStateUpdateTask() { + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + AutoFollowPattern autoFollowPattern = + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null); + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata( + Collections.singletonMap("test_alias", autoFollowPattern), + Collections.emptyMap() + ); + + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()) + .putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + .build()); + return newState.build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Exception e) { + latch.countDown(); + fail("unexpected error [" + e.getMessage() + "]"); + } + }); + latch.await(); + final Logger logger = LogManager.getLogger(AutoFollowCoordinator.class); final MockLogAppender appender = new MockLogAppender(); appender.start(); @@ -156,11 +195,11 @@ private FollowIndexAction.Request getFollowRequest() { return new FollowIndexAction.Request( "leader", "follower", - ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, - ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, - ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + FollowIndexAction.DEFAULT_MAX_BATCH_OPERATION_COUNT, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_READ_BATCHES, + FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, + FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java index 7980e1281406a..c0919f25fe3ff 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -38,13 +38,13 @@ import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; -import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import org.elasticsearch.xpack.ccr.action.ShardChangesAction; -import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; -import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.io.IOException; import java.util.Arrays; @@ -335,7 +335,7 @@ public void testFollowIndexAndCloseNode() throws Exception { final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", randomIntBetween(32, 2048), randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), - ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); client().execute(FollowIndexAction.INSTANCE, followRequest).get(); long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getMaxBatchOperationCount(), @@ -507,7 +507,7 @@ private CheckedRunnable assertTask(final int numberOfPrimaryShards, f } } assertThat(taskInfo, notNullValue()); - ShardFollowNodeTask.Status status = (ShardFollowNodeTask.Status) taskInfo.getStatus(); + ShardFollowNodeTaskStatus status = (ShardFollowNodeTaskStatus) taskInfo.getStatus(); assertThat(status, notNullValue()); assertThat("incorrect global checkpoint " + shardFollowTaskParams, status.followerGlobalCheckpoint(), @@ -665,9 +665,9 @@ private void assertSameDocCount(String index1, String index2) throws Exception { } public static FollowIndexAction.Request createFollowRequest(String leaderIndex, String followIndex) { - return new FollowIndexAction.Request(leaderIndex, followIndex, ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, - ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + return new FollowIndexAction.Request(leaderIndex, followIndex, FollowIndexAction.DEFAULT_MAX_BATCH_OPERATION_COUNT, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_READ_BATCHES, FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + FollowIndexAction.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, FollowIndexAction.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10)); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 476f858986fd5..31af326250c3b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.util.ArrayList; import java.util.Collections; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java index a4808e428feca..f4bd8a69e3f5d 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowTests.java @@ -131,7 +131,7 @@ public void testAutoFollowParameterAreDelegated() throws Exception { request.setMaxOperationSizeInBytes(randomNonNegativeLong()); } if (randomBoolean()) { - request.setRetryTimeout(TimeValue.timeValueMillis(500)); + request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); @@ -162,8 +162,8 @@ public void testAutoFollowParameterAreDelegated() throws Exception { if (request.getMaxOperationSizeInBytes() != null) { assertThat(shardFollowTask.getMaxBatchSizeInBytes(), equalTo(request.getMaxOperationSizeInBytes())); } - if (request.getRetryTimeout() != null) { - assertThat(shardFollowTask.getRetryTimeout(), equalTo(request.getRetryTimeout())); + if (request.getMaxRetryDelay() != null) { + assertThat(shardFollowTask.getMaxRetryDelay(), equalTo(request.getMaxRetryDelay())); } if (request.getIdleShardRetryDelay() != null) { assertThat(shardFollowTask.getIdleShardRetryDelay(), equalTo(request.getIdleShardRetryDelay())); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java index c68d18499658c..c751ca5f00082 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; public class CreateAndFollowIndexRequestTests extends AbstractStreamableTestCase { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java index 11a518ef06757..44ac21055a778 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; public class CreateAndFollowIndexResponseTests extends AbstractStreamableTestCase { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java index 7202f7202c643..2017fa2fdb989 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.io.IOException; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java index 27760578db945..d6dad3b019ca3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutAutoFollowPatternRequestTests.java @@ -41,7 +41,7 @@ protected PutAutoFollowPatternAction.Request createTestInstance() { request.setIdleShardRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { - request.setRetryTimeout(TimeValue.timeValueMillis(500)); + request.setMaxRetryDelay(TimeValue.timeValueMillis(500)); } if (randomBoolean()) { request.setMaxBatchOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index 9bfd6b9d6ef42..dacb60372e634 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -15,6 +15,8 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -52,7 +54,7 @@ public void testMultipleReaderWriter() throws Exception { private void startAndAssertAndStopTask(ShardFollowNodeTask task, TestRun testRun) throws Exception { task.start(testRun.startSeqNo - 1, testRun.startSeqNo - 1, testRun.startSeqNo - 1, testRun.startSeqNo - 1); assertBusy(() -> { - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.leaderGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); assertThat(status.followerGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); final long numberOfFailedFetches = @@ -65,7 +67,7 @@ private void startAndAssertAndStopTask(ShardFollowNodeTask task, TestRun testRun task.markAsCompleted(); assertBusy(() -> { - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); }); @@ -75,7 +77,7 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR AtomicBoolean stopped = new AtomicBoolean(false); ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), testRun.maxOperationCount, concurrency, - ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, concurrency, 10240, + FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, concurrency, 10240, TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap()); ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java index 8368a818e006e..2f145e7a98c9f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.io.IOException; import java.util.Map; @@ -21,17 +22,17 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase { +public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase { @Override - protected ShardFollowNodeTask.Status doParseInstance(XContentParser parser) throws IOException { - return ShardFollowNodeTask.Status.fromXContent(parser); + protected ShardFollowNodeTaskStatus doParseInstance(XContentParser parser) throws IOException { + return ShardFollowNodeTaskStatus.fromXContent(parser); } @Override - protected ShardFollowNodeTask.Status createTestInstance() { + protected ShardFollowNodeTaskStatus createTestInstance() { // if you change this constructor, reflect the changes in the hand-written assertions below - return new ShardFollowNodeTask.Status( + return new ShardFollowNodeTaskStatus( randomAlphaOfLength(4), randomInt(), randomNonNegativeLong(), @@ -57,7 +58,7 @@ protected ShardFollowNodeTask.Status createTestInstance() { } @Override - protected void assertEqualInstances(final ShardFollowNodeTask.Status expectedInstance, final ShardFollowNodeTask.Status newInstance) { + protected void assertEqualInstances(final ShardFollowNodeTaskStatus expectedInstance, final ShardFollowNodeTaskStatus newInstance) { assertNotSame(expectedInstance, newInstance); assertThat(newInstance.leaderIndex(), equalTo(expectedInstance.leaderIndex())); assertThat(newInstance.getShardId(), equalTo(expectedInstance.getShardId())); @@ -108,8 +109,8 @@ private NavigableMap randomReadExceptions() { } @Override - protected Writeable.Reader instanceReader() { - return ShardFollowNodeTask.Status::new; + protected Writeable.Reader instanceReader() { + return ShardFollowNodeTaskStatus::new; } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 4f7c0bf16645c..e25d95538b2f5 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.net.ConnectException; import java.nio.charset.StandardCharsets; @@ -29,12 +30,13 @@ import java.util.function.Consumer; import java.util.function.LongConsumer; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.sameInstance; public class ShardFollowNodeTaskTests extends ESTestCase { @@ -44,7 +46,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { private List> bulkShardOperationRequests; private BiConsumer scheduler = (delay, task) -> task.run(); - private Consumer beforeSendShardChangesRequest = status -> {}; + private Consumer beforeSendShardChangesRequest = status -> {}; private AtomicBoolean simulateResponse = new AtomicBoolean(); @@ -66,7 +68,7 @@ public void testCoordinateReads() { assertThat(shardChangesRequests, contains(new long[][]{ {6L, 8L}, {14L, 8L}, {22L, 8L}, {30L, 8L}, {38L, 8L}, {46L, 8L}, {54L, 7L}} )); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(7)); assertThat(status.lastRequestedSeqNo(), equalTo(60L)); } @@ -86,7 +88,7 @@ public void testWriteBuffer() { task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because write buffer is full - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -102,7 +104,7 @@ public void testMaxConcurrentReads() { assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); assertThat(shardChangesRequests.get(0)[1], equalTo(8L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(7L)); } @@ -140,7 +142,7 @@ public void testTaskCancelledAfterReadLimitHasBeenReached() { assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(15L)); @@ -164,7 +166,7 @@ public void testTaskCancelledAfterWriteBufferLimitHasBeenReached() { assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -176,7 +178,7 @@ public void testReceiveRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); - int max = randomIntBetween(1, 10); + int max = randomIntBetween(1, 30); for (int i = 0; i < max; i++) { readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); } @@ -211,7 +213,7 @@ public void testReceiveRetryableError() { } assertFalse("task is not stopped", task.isStopped()); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.numberOfFailedFetches(), equalTo((long)max)); @@ -222,59 +224,6 @@ public void testReceiveRetryableError() { assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } - public void testReceiveRetryableErrorRetriedTooManyTimes() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); - startTask(task, 63, -1); - - int max = randomIntBetween(11, 32); - for (int i = 0; i < max; i++) { - readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); - } - final AtomicLong retryCounter = new AtomicLong(); - // before each retry, we assert the fetch failures; after the last retry, the fetch failure should persist - beforeSendShardChangesRequest = status -> { - assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); - if (retryCounter.get() > 0) { - assertThat(status.fetchExceptions().entrySet(), hasSize(1)); - final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); - assertThat(entry.getKey(), equalTo(0L)); - assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); - assertNotNull(entry.getValue().getCause()); - assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); - final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); - assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); - assertThat(cause.getShardId().getId(), equalTo(0)); - } - retryCounter.incrementAndGet(); - }; - task.coordinateReads(); - - assertThat(shardChangesRequests.size(), equalTo(11)); - for (long[] shardChangesRequest : shardChangesRequests) { - assertThat(shardChangesRequest[0], equalTo(0L)); - assertThat(shardChangesRequest[1], equalTo(64L)); - } - - assertTrue("task is stopped", task.isStopped()); - assertThat(fatalError, notNullValue()); - assertThat(fatalError.getMessage(), containsString("retrying failed [")); - ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); - assertThat(status.numberOfFailedFetches(), equalTo(11L)); - assertThat(status.fetchExceptions().entrySet(), hasSize(1)); - final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); - assertThat(entry.getKey(), equalTo(0L)); - assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); - assertNotNull(entry.getValue().getCause()); - assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); - final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); - assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); - assertThat(cause.getShardId().getId(), equalTo(0)); - assertThat(status.lastRequestedSeqNo(), equalTo(63L)); - assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); - } - public void testReceiveNonRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); @@ -299,7 +248,7 @@ public void testReceiveNonRetryableError() { assertTrue("task is stopped", task.isStopped()); assertThat(fatalError, sameInstance(failure)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.numberOfFailedFetches(), equalTo(1L)); @@ -326,7 +275,7 @@ public void testHandleReadResponse() { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(0L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); @@ -353,7 +302,7 @@ public void testReceiveLessThanRequested() { assertThat(shardChangesRequests.get(0)[0], equalTo(21L)); assertThat(shardChangesRequests.get(0)[1], equalTo(43L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -376,7 +325,7 @@ public void testCancelAndReceiveLessThanRequested() { assertThat(shardChangesRequests.size(), equalTo(0)); assertThat(bulkShardOperationRequests.size(), equalTo(0)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(0)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -399,7 +348,7 @@ public void testReceiveNothingExpectedSomething() { assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -441,7 +390,7 @@ public void testMappingUpdate() { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(1L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); @@ -454,7 +403,7 @@ public void testMappingUpdateRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); - int max = randomIntBetween(1, 10); + int max = randomIntBetween(1, 30); for (int i = 0; i < max; i++) { mappingUpdateFailures.add(new ConnectException()); } @@ -466,7 +415,7 @@ public void testMappingUpdateRetryableError() { assertThat(mappingUpdateFailures.size(), equalTo(0)); assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(task.isStopped(), equalTo(false)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(1L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); @@ -475,31 +424,6 @@ public void testMappingUpdateRetryableError() { } - public void testMappingUpdateRetryableErrorRetriedTooManyTimes() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); - startTask(task, 63, -1); - - int max = randomIntBetween(11, 20); - for (int i = 0; i < max; i++) { - mappingUpdateFailures.add(new ConnectException()); - } - mappingVersions.add(1L); - task.coordinateReads(); - ShardChangesAction.Response response = generateShardChangesResponse(0, 64, 1L, 64L); - task.handleReadResponse(0L, 64L, response); - - assertThat(mappingUpdateFailures.size(), equalTo(max - 11)); - assertThat(mappingVersions.size(), equalTo(1)); - assertThat(bulkShardOperationRequests.size(), equalTo(0)); - assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.mappingVersion(), equalTo(0L)); - assertThat(status.numberOfConcurrentReads(), equalTo(1)); - assertThat(status.numberOfConcurrentWrites(), equalTo(0)); - assertThat(status.lastRequestedSeqNo(), equalTo(63L)); - assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); - } - public void testMappingUpdateNonRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); @@ -511,7 +435,7 @@ public void testMappingUpdateNonRetryableError() { assertThat(bulkShardOperationRequests.size(), equalTo(0)); assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.mappingVersion(), equalTo(0L)); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(0)); @@ -535,7 +459,7 @@ public void testCoordinateWrites() { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); @@ -553,7 +477,7 @@ public void testMaxConcurrentWrites() { assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()).subList(0, 64))); assertThat(bulkShardOperationRequests.get(1), equalTo(Arrays.asList(response.getOperations()).subList(64, 128))); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(2)); task = createShardFollowTask(64, 1, 4, Integer.MAX_VALUE, Long.MAX_VALUE); @@ -583,7 +507,7 @@ public void testMaxBatchOperationCount() { assertThat(bulkShardOperationRequests.get(i), equalTo(Arrays.asList(response.getOperations()).subList(offset, offset + 8))); } - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(32)); } @@ -596,7 +520,7 @@ public void testRetryableError() { assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - int max = randomIntBetween(1, 10); + int max = randomIntBetween(1, 30); for (int i = 0; i < max; i++) { writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); } @@ -610,35 +534,7 @@ public void testRetryableError() { assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); } assertThat(task.isStopped(), equalTo(false)); - ShardFollowNodeTask.Status status = task.getStatus(); - assertThat(status.numberOfConcurrentWrites(), equalTo(1)); - assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); - } - - public void testRetryableErrorRetriedTooManyTimes() { - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); - startTask(task, 63, -1); - - task.coordinateReads(); - assertThat(shardChangesRequests.size(), equalTo(1)); - assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); - assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - - int max = randomIntBetween(11, 32); - for (int i = 0; i < max; i++) { - writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); - } - ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 643); - // Also invokes coordinatesWrites() - task.innerHandleReadResponse(0L, 63L, response); - - // Number of requests is equal to initial request + retried attempts: - assertThat(bulkShardOperationRequests.size(), equalTo(11)); - for (List operations : bulkShardOperationRequests) { - assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); - } - assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } @@ -660,7 +556,7 @@ public void testNonRetryableError() { assertThat(bulkShardOperationRequests.size(), equalTo(1)); assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); assertThat(task.isStopped(), equalTo(true)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentWrites(), equalTo(1)); assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); } @@ -704,15 +600,32 @@ public void testHandleWriteResponse() { assertThat(shardChangesRequests.get(0)[0], equalTo(64L)); assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - ShardFollowNodeTask.Status status = task.getStatus(); + ShardFollowNodeTaskStatus status = task.getStatus(); assertThat(status.numberOfConcurrentReads(), equalTo(1)); assertThat(status.lastRequestedSeqNo(), equalTo(63L)); assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); assertThat(status.followerGlobalCheckpoint(), equalTo(63L)); } - ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, int maxConcurrentReadBatches, int maxConcurrentWriteBatches, - int bufferWriteLimit, long maxBatchSizeInBytes) { + public void testComputeDelay() { + long maxDelayInMillis = 1000; + assertThat(ShardFollowNodeTask.computeDelay(0, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(50L))); + assertThat(ShardFollowNodeTask.computeDelay(1, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(50L))); + assertThat(ShardFollowNodeTask.computeDelay(2, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(100L))); + assertThat(ShardFollowNodeTask.computeDelay(3, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(200L))); + assertThat(ShardFollowNodeTask.computeDelay(4, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(400L))); + assertThat(ShardFollowNodeTask.computeDelay(5, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(800L))); + assertThat(ShardFollowNodeTask.computeDelay(6, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + assertThat(ShardFollowNodeTask.computeDelay(7, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + assertThat(ShardFollowNodeTask.computeDelay(8, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + assertThat(ShardFollowNodeTask.computeDelay(1024, maxDelayInMillis), allOf(greaterThanOrEqualTo(0L), lessThanOrEqualTo(1000L))); + } + + private ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, + int maxConcurrentReadBatches, + int maxConcurrentWriteBatches, + int bufferWriteLimit, + long maxBatchSizeInBytes) { AtomicBoolean stopped = new AtomicBoolean(false); ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), maxBatchOperationCount, maxConcurrentReadBatches, maxBatchSizeInBytes, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexActionTests.java similarity index 89% rename from x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java rename to x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexActionTests.java index 5b52700f5579b..7691945643d57 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexActionTests.java @@ -3,6 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ + package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.Version; @@ -15,32 +16,33 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.ShardChangesIT; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; import java.io.IOException; +import static org.elasticsearch.xpack.ccr.action.TransportFollowIndexAction.validate; import static org.hamcrest.Matchers.equalTo; -public class FollowIndexActionTests extends ESTestCase { +public class TransportFollowIndexActionTests extends ESTestCase { public void testValidation() throws IOException { FollowIndexAction.Request request = ShardChangesIT.createFollowRequest("index1", "index2"); { // should fail, because leader index does not exist - Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, null, null, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, null, null, null)); assertThat(e.getMessage(), equalTo("leader index [index1] does not exist")); } { // should fail, because follow index does not exist IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); - Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, leaderIMD, null, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, null, null)); assertThat(e.getMessage(), equalTo("follow index [index2] does not exist")); } { // should fail because leader index does not have soft deletes enabled IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, null)); assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); } { @@ -48,8 +50,7 @@ public void testValidation() throws IOException { IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); IndexMetaData followIMD = createIMD("index2", 4, Settings.EMPTY); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, null)); assertThat(e.getMessage(), equalTo("leader index primary shards [5] does not match with the number of shards of the follow index [4]")); } @@ -59,8 +60,7 @@ public void testValidation() throws IOException { .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); IndexMetaData followIMD = createIMD("index2", State.OPEN, "{}", 5, Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, null)); assertThat(e.getMessage(), equalTo("leader and follow index must be open")); } { @@ -71,8 +71,7 @@ public void testValidation() throws IOException { Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); mapperService.updateMapping(null, followIMD); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, mapperService)); assertThat(e.getMessage(), equalTo("mapper [field] of different type, current_type [text], merged_type [keyword]")); } { @@ -86,8 +85,7 @@ public void testValidation() throws IOException { .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put("index.analysis.analyzer.my_analyzer.type", "custom") .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); - Exception e = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, null)); assertThat(e.getMessage(), equalTo("the leader and follower index settings must be identical")); } { @@ -100,8 +98,8 @@ public void testValidation() throws IOException { MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followingIndexSettings, "index2"); mapperService.updateMapping(null, followIMD); - IllegalArgumentException error = expectThrows(IllegalArgumentException.class, - () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + IllegalArgumentException error = + expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, mapperService)); assertThat(error.getMessage(), equalTo("the following index [index2] is not ready to follow; " + "the setting [index.xpack.ccr.following_index] must be enabled.")); } @@ -113,7 +111,7 @@ public void testValidation() throws IOException { .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); mapperService.updateMapping(null, followIMD); - FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + validate(request, leaderIMD, followIMD, mapperService); } { // should succeed, index settings are identical @@ -129,7 +127,7 @@ public void testValidation() throws IOException { MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followIMD.getSettings(), "index2"); mapperService.updateMapping(null, followIMD); - FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + validate(request, leaderIMD, followIMD, mapperService); } { // should succeed despite whitelisted settings being different @@ -147,7 +145,7 @@ public void testValidation() throws IOException { MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), followIMD.getSettings(), "index2"); mapperService.updateMapping(null, followIMD); - FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + validate(request, leaderIMD, followIMD, mapperService); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java index 77f511ba4d0ef..3f27f66b27b77 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java @@ -13,6 +13,7 @@ import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.security.client.SecurityClient; @@ -20,6 +21,7 @@ import java.util.Collections; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -28,6 +30,7 @@ public class XPackClient { private final Client client; + private final CcrClient ccrClient; private final LicensingClient licensingClient; private final MonitoringClient monitoringClient; private final SecurityClient securityClient; @@ -35,7 +38,8 @@ public class XPackClient { private final MachineLearningClient machineLearning; public XPackClient(Client client) { - this.client = client; + this.client = Objects.requireNonNull(client, "client"); + this.ccrClient = new CcrClient(client); this.licensingClient = new LicensingClient(client); this.monitoringClient = new MonitoringClient(client); this.securityClient = new SecurityClient(client); @@ -47,6 +51,10 @@ public Client es() { return client; } + public CcrClient ccr() { + return ccrClient; + } + public LicensingClient licensing() { return licensingClient; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index fb4ce0b90f4a4..997f04e33bd77 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -35,6 +35,12 @@ private XPackSettings() { throw new IllegalStateException("Utility class should not be instantiated"); } + + /** + * Setting for controlling whether or not CCR is enabled. + */ + public static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); + /** Setting for enabling or disabling security. Defaults to true. */ public static final Setting SECURITY_ENABLED = Setting.boolSetting("xpack.security.enabled", true, Setting.Property.NodeScope); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java new file mode 100644 index 0000000000000..2f3c4efb9ad3b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java @@ -0,0 +1,509 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +public class ShardFollowNodeTaskStatus implements Task.Status { + + public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; + + private static final ParseField LEADER_INDEX = new ParseField("leader_index"); + private static final ParseField SHARD_ID = new ParseField("shard_id"); + private static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); + private static final ParseField LEADER_MAX_SEQ_NO_FIELD = new ParseField("leader_max_seq_no"); + private static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); + private static final ParseField FOLLOWER_MAX_SEQ_NO_FIELD = new ParseField("follower_max_seq_no"); + private static final ParseField LAST_REQUESTED_SEQ_NO_FIELD = new ParseField("last_requested_seq_no"); + private static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); + private static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); + private static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); + private static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); + private static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); + private static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); + private static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); + private static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received"); + private static final ParseField TOTAL_TRANSFERRED_BYTES = new ParseField("total_transferred_bytes"); + private static final ParseField TOTAL_INDEX_TIME_MILLIS_FIELD = new ParseField("total_index_time_millis"); + private static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); + private static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); + private static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); + private static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); + private static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser STATUS_PARSER = + new ConstructingObjectParser<>( + STATUS_PARSER_NAME, + args -> new ShardFollowNodeTaskStatus( + (String) args[0], + (int) args[1], + (long) args[2], + (long) args[3], + (long) args[4], + (long) args[5], + (long) args[6], + (int) args[7], + (int) args[8], + (int) args[9], + (long) args[10], + (long) args[11], + (long) args[12], + (long) args[13], + (long) args[14], + (long) args[15], + (long) args[16], + (long) args[17], + (long) args[18], + (long) args[19], + new TreeMap<>( + ((List>) args[20]) + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), + (long) args[21])); + + public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; + + static final ConstructingObjectParser, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = + new ConstructingObjectParser<>( + FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, + args -> new AbstractMap.SimpleEntry<>((long) args[0], (ElasticsearchException) args[1])); + + static { + STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); + STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); + } + + static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); + static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); + + static { + FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); + FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( + ConstructingObjectParser.constructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + FETCH_EXCEPTIONS_ENTRY_EXCEPTION); + } + + private final String leaderIndex; + + public String leaderIndex() { + return leaderIndex; + } + + private final int shardId; + + public int getShardId() { + return shardId; + } + + private final long leaderGlobalCheckpoint; + + public long leaderGlobalCheckpoint() { + return leaderGlobalCheckpoint; + } + + private final long leaderMaxSeqNo; + + public long leaderMaxSeqNo() { + return leaderMaxSeqNo; + } + + private final long followerGlobalCheckpoint; + + public long followerGlobalCheckpoint() { + return followerGlobalCheckpoint; + } + + private final long followerMaxSeqNo; + + public long followerMaxSeqNo() { + return followerMaxSeqNo; + } + + private final long lastRequestedSeqNo; + + public long lastRequestedSeqNo() { + return lastRequestedSeqNo; + } + + private final int numberOfConcurrentReads; + + public int numberOfConcurrentReads() { + return numberOfConcurrentReads; + } + + private final int numberOfConcurrentWrites; + + public int numberOfConcurrentWrites() { + return numberOfConcurrentWrites; + } + + private final int numberOfQueuedWrites; + + public int numberOfQueuedWrites() { + return numberOfQueuedWrites; + } + + private final long mappingVersion; + + public long mappingVersion() { + return mappingVersion; + } + + private final long totalFetchTimeMillis; + + public long totalFetchTimeMillis() { + return totalFetchTimeMillis; + } + + private final long numberOfSuccessfulFetches; + + public long numberOfSuccessfulFetches() { + return numberOfSuccessfulFetches; + } + + private final long numberOfFailedFetches; + + public long numberOfFailedFetches() { + return numberOfFailedFetches; + } + + private final long operationsReceived; + + public long operationsReceived() { + return operationsReceived; + } + + private final long totalTransferredBytes; + + public long totalTransferredBytes() { + return totalTransferredBytes; + } + + private final long totalIndexTimeMillis; + + public long totalIndexTimeMillis() { + return totalIndexTimeMillis; + } + + private final long numberOfSuccessfulBulkOperations; + + public long numberOfSuccessfulBulkOperations() { + return numberOfSuccessfulBulkOperations; + } + + private final long numberOfFailedBulkOperations; + + public long numberOfFailedBulkOperations() { + return numberOfFailedBulkOperations; + } + + private final long numberOfOperationsIndexed; + + public long numberOfOperationsIndexed() { + return numberOfOperationsIndexed; + } + + private final NavigableMap fetchExceptions; + + public NavigableMap fetchExceptions() { + return fetchExceptions; + } + + private final long timeSinceLastFetchMillis; + + public long timeSinceLastFetchMillis() { + return timeSinceLastFetchMillis; + } + + public ShardFollowNodeTaskStatus( + final String leaderIndex, + final int shardId, + final long leaderGlobalCheckpoint, + final long leaderMaxSeqNo, + final long followerGlobalCheckpoint, + final long followerMaxSeqNo, + final long lastRequestedSeqNo, + final int numberOfConcurrentReads, + final int numberOfConcurrentWrites, + final int numberOfQueuedWrites, + final long mappingVersion, + final long totalFetchTimeMillis, + final long numberOfSuccessfulFetches, + final long numberOfFailedFetches, + final long operationsReceived, + final long totalTransferredBytes, + final long totalIndexTimeMillis, + final long numberOfSuccessfulBulkOperations, + final long numberOfFailedBulkOperations, + final long numberOfOperationsIndexed, + final NavigableMap fetchExceptions, + final long timeSinceLastFetchMillis) { + this.leaderIndex = leaderIndex; + this.shardId = shardId; + this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; + this.leaderMaxSeqNo = leaderMaxSeqNo; + this.followerGlobalCheckpoint = followerGlobalCheckpoint; + this.followerMaxSeqNo = followerMaxSeqNo; + this.lastRequestedSeqNo = lastRequestedSeqNo; + this.numberOfConcurrentReads = numberOfConcurrentReads; + this.numberOfConcurrentWrites = numberOfConcurrentWrites; + this.numberOfQueuedWrites = numberOfQueuedWrites; + this.mappingVersion = mappingVersion; + this.totalFetchTimeMillis = totalFetchTimeMillis; + this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; + this.numberOfFailedFetches = numberOfFailedFetches; + this.operationsReceived = operationsReceived; + this.totalTransferredBytes = totalTransferredBytes; + this.totalIndexTimeMillis = totalIndexTimeMillis; + this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; + this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; + this.numberOfOperationsIndexed = numberOfOperationsIndexed; + this.fetchExceptions = Objects.requireNonNull(fetchExceptions); + this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; + } + + public ShardFollowNodeTaskStatus(final StreamInput in) throws IOException { + this.leaderIndex = in.readString(); + this.shardId = in.readVInt(); + this.leaderGlobalCheckpoint = in.readZLong(); + this.leaderMaxSeqNo = in.readZLong(); + this.followerGlobalCheckpoint = in.readZLong(); + this.followerMaxSeqNo = in.readZLong(); + this.lastRequestedSeqNo = in.readZLong(); + this.numberOfConcurrentReads = in.readVInt(); + this.numberOfConcurrentWrites = in.readVInt(); + this.numberOfQueuedWrites = in.readVInt(); + this.mappingVersion = in.readVLong(); + this.totalFetchTimeMillis = in.readVLong(); + this.numberOfSuccessfulFetches = in.readVLong(); + this.numberOfFailedFetches = in.readVLong(); + this.operationsReceived = in.readVLong(); + this.totalTransferredBytes = in.readVLong(); + this.totalIndexTimeMillis = in.readVLong(); + this.numberOfSuccessfulBulkOperations = in.readVLong(); + this.numberOfFailedBulkOperations = in.readVLong(); + this.numberOfOperationsIndexed = in.readVLong(); + this.fetchExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, StreamInput::readException)); + this.timeSinceLastFetchMillis = in.readZLong(); + } + + @Override + public String getWriteableName() { + return STATUS_PARSER_NAME; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(leaderIndex); + out.writeVInt(shardId); + out.writeZLong(leaderGlobalCheckpoint); + out.writeZLong(leaderMaxSeqNo); + out.writeZLong(followerGlobalCheckpoint); + out.writeZLong(followerMaxSeqNo); + out.writeZLong(lastRequestedSeqNo); + out.writeVInt(numberOfConcurrentReads); + out.writeVInt(numberOfConcurrentWrites); + out.writeVInt(numberOfQueuedWrites); + out.writeVLong(mappingVersion); + out.writeVLong(totalFetchTimeMillis); + out.writeVLong(numberOfSuccessfulFetches); + out.writeVLong(numberOfFailedFetches); + out.writeVLong(operationsReceived); + out.writeVLong(totalTransferredBytes); + out.writeVLong(totalIndexTimeMillis); + out.writeVLong(numberOfSuccessfulBulkOperations); + out.writeVLong(numberOfFailedBulkOperations); + out.writeVLong(numberOfOperationsIndexed); + out.writeMap(fetchExceptions, StreamOutput::writeVLong, StreamOutput::writeException); + out.writeZLong(timeSinceLastFetchMillis); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + toXContentFragment(builder, params); + } + builder.endObject(); + return builder; + } + + public XContentBuilder toXContentFragment(final XContentBuilder builder, final Params params) throws IOException { + builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); + builder.field(SHARD_ID.getPreferredName(), shardId); + builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); + builder.field(LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), leaderMaxSeqNo); + builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); + builder.field(FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), followerMaxSeqNo); + builder.field(LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), lastRequestedSeqNo); + builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); + builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); + builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); + builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); + builder.humanReadableField( + TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), + "total_fetch_time", + new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches); + builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches); + builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived); + builder.humanReadableField( + TOTAL_TRANSFERRED_BYTES.getPreferredName(), + "total_transferred", + new ByteSizeValue(totalTransferredBytes, ByteSizeUnit.BYTES)); + builder.humanReadableField( + TOTAL_INDEX_TIME_MILLIS_FIELD.getPreferredName(), + "total_index_time", + new TimeValue(totalIndexTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); + builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); + builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); + builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); + { + for (final Map.Entry entry : fetchExceptions.entrySet()) { + builder.startObject(); + { + builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); + builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + builder.humanReadableField( + TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), + "time_since_last_fetch", + new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); + return builder; + } + + public static ShardFollowNodeTaskStatus fromXContent(final XContentParser parser) { + return STATUS_PARSER.apply(parser, null); + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final ShardFollowNodeTaskStatus that = (ShardFollowNodeTaskStatus) o; + return leaderIndex.equals(that.leaderIndex) && + shardId == that.shardId && + leaderGlobalCheckpoint == that.leaderGlobalCheckpoint && + leaderMaxSeqNo == that.leaderMaxSeqNo && + followerGlobalCheckpoint == that.followerGlobalCheckpoint && + followerMaxSeqNo == that.followerMaxSeqNo && + lastRequestedSeqNo == that.lastRequestedSeqNo && + numberOfConcurrentReads == that.numberOfConcurrentReads && + numberOfConcurrentWrites == that.numberOfConcurrentWrites && + numberOfQueuedWrites == that.numberOfQueuedWrites && + mappingVersion == that.mappingVersion && + totalFetchTimeMillis == that.totalFetchTimeMillis && + numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && + numberOfFailedFetches == that.numberOfFailedFetches && + operationsReceived == that.operationsReceived && + totalTransferredBytes == that.totalTransferredBytes && + numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && + numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && + numberOfOperationsIndexed == that.numberOfOperationsIndexed && + /* + * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal + * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by + * keys. + */ + fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && + getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && + timeSinceLastFetchMillis == that.timeSinceLastFetchMillis; + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + shardId, + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numberOfConcurrentReads, + numberOfConcurrentWrites, + numberOfQueuedWrites, + mappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + /* + * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the + * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. + */ + fetchExceptions.keySet(), + getFetchExceptionMessages(this), + timeSinceLastFetchMillis); + } + + private static List getFetchExceptionMessages(final ShardFollowNodeTaskStatus status) { + return status.fetchExceptions().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); + } + + public String toString() { + return Strings.toString(this); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java similarity index 72% rename from x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java index b5d6697fc73c2..1074b6905d33e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ccr.action; +package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; @@ -21,6 +21,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import java.io.IOException; import java.util.Collections; @@ -28,7 +29,7 @@ import java.util.Map; import java.util.TreeMap; -public class CcrStatsAction extends Action { +public class CcrStatsAction extends Action { public static final String NAME = "cluster:monitor/ccr/stats"; @@ -39,41 +40,45 @@ private CcrStatsAction() { } @Override - public TasksResponse newResponse() { - return new TasksResponse(); + public StatsResponses newResponse() { + return new StatsResponses(); } - public static class TasksResponse extends BaseTasksResponse implements ToXContentObject { + public static class StatsResponses extends BaseTasksResponse implements ToXContentObject { - private final List taskResponses; + private final List statsResponse; - public TasksResponse() { + public List getStatsResponses() { + return statsResponse; + } + + public StatsResponses() { this(Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); } - TasksResponse( + public StatsResponses( final List taskFailures, final List nodeFailures, - final List taskResponses) { + final List statsResponse) { super(taskFailures, nodeFailures); - this.taskResponses = taskResponses; + this.statsResponse = statsResponse; } @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { // sort by index name, then shard ID - final Map> taskResponsesByIndex = new TreeMap<>(); - for (final TaskResponse taskResponse : taskResponses) { + final Map> taskResponsesByIndex = new TreeMap<>(); + for (final StatsResponse statsResponse : statsResponse) { taskResponsesByIndex.computeIfAbsent( - taskResponse.followerShardId().getIndexName(), - k -> new TreeMap<>()).put(taskResponse.followerShardId().getId(), taskResponse); + statsResponse.followerShardId().getIndexName(), + k -> new TreeMap<>()).put(statsResponse.followerShardId().getId(), statsResponse); } builder.startObject(); { - for (final Map.Entry> index : taskResponsesByIndex.entrySet()) { + for (final Map.Entry> index : taskResponsesByIndex.entrySet()) { builder.startArray(index.getKey()); { - for (final Map.Entry shard : index.getValue().entrySet()) { + for (final Map.Entry shard : index.getValue().entrySet()) { shard.getValue().status().toXContent(builder, params); } } @@ -85,7 +90,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa } } - public static class TasksRequest extends BaseTasksRequest implements IndicesRequest { + public static class StatsRequest extends BaseTasksRequest implements IndicesRequest { private String[] indices; @@ -143,28 +148,28 @@ public void writeTo(StreamOutput out) throws IOException { } - public static class TaskResponse implements Writeable { + public static class StatsResponse implements Writeable { private final ShardId followerShardId; - ShardId followerShardId() { + public ShardId followerShardId() { return followerShardId; } - private final ShardFollowNodeTask.Status status; + private final ShardFollowNodeTaskStatus status; - ShardFollowNodeTask.Status status() { + public ShardFollowNodeTaskStatus status() { return status; } - TaskResponse(final ShardId followerShardId, final ShardFollowNodeTask.Status status) { + public StatsResponse(final ShardId followerShardId, final ShardFollowNodeTaskStatus status) { this.followerShardId = followerShardId; this.status = status; } - TaskResponse(final StreamInput in) throws IOException { + public StatsResponse(final StreamInput in) throws IOException { this.followerShardId = ShardId.readShardId(in); - this.status = new ShardFollowNodeTask.Status(in); + this.status = new ShardFollowNodeTaskStatus(in); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CreateAndFollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CreateAndFollowIndexAction.java new file mode 100644 index 0000000000000..ea63815c2b933 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CreateAndFollowIndexAction.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public final class CreateAndFollowIndexAction extends Action { + + public static final CreateAndFollowIndexAction INSTANCE = new CreateAndFollowIndexAction(); + public static final String NAME = "indices:admin/xpack/ccr/create_and_follow_index"; + + private CreateAndFollowIndexAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest { + + private FollowIndexAction.Request followRequest; + + public Request(FollowIndexAction.Request followRequest) { + this.followRequest = Objects.requireNonNull(followRequest); + } + + public Request() { + + } + + public FollowIndexAction.Request getFollowRequest() { + return followRequest; + } + + @Override + public ActionRequestValidationException validate() { + return followRequest.validate(); + } + + @Override + public String[] indices() { + return new String[]{followRequest.getFollowerIndex()}; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followRequest = new FollowIndexAction.Request(); + followRequest.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + followRequest.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(followRequest, request.followRequest); + } + + @Override + public int hashCode() { + return Objects.hash(followRequest); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private boolean followIndexCreated; + private boolean followIndexShardsAcked; + private boolean indexFollowingStarted; + + public Response() { + + } + + public Response(boolean followIndexCreated, boolean followIndexShardsAcked, boolean indexFollowingStarted) { + this.followIndexCreated = followIndexCreated; + this.followIndexShardsAcked = followIndexShardsAcked; + this.indexFollowingStarted = indexFollowingStarted; + } + + public boolean isFollowIndexCreated() { + return followIndexCreated; + } + + public boolean isFollowIndexShardsAcked() { + return followIndexShardsAcked; + } + + public boolean isIndexFollowingStarted() { + return indexFollowingStarted; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followIndexCreated = in.readBoolean(); + followIndexShardsAcked = in.readBoolean(); + indexFollowingStarted = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(followIndexCreated); + out.writeBoolean(followIndexShardsAcked); + out.writeBoolean(indexFollowingStarted); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("follow_index_created", followIndexCreated); + builder.field("follow_index_shards_acked", followIndexShardsAcked); + builder.field("index_following_started", indexFollowingStarted); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return followIndexCreated == response.followIndexCreated && + followIndexShardsAcked == response.followIndexShardsAcked && + indexFollowingStarted == response.indexFollowingStarted; + } + + @Override + public int hashCode() { + return Objects.hash(followIndexCreated, followIndexShardsAcked, indexFollowingStarted); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java new file mode 100644 index 0000000000000..2c311356d4943 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java @@ -0,0 +1,306 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public final class FollowIndexAction extends Action { + + public static final FollowIndexAction INSTANCE = new FollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; + + public static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; + public static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; + public static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; + public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; + public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; + public static final TimeValue DEFAULT_RETRY_TIMEOUT = new TimeValue(500); + public static final TimeValue DEFAULT_IDLE_SHARD_RETRY_DELAY = TimeValue.timeValueSeconds(10); + + private FollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); + private static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); + private static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); + private static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); + private static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); + private static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); + private static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + private static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); + private static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, + (args, followerIndex) -> { + if (args[1] != null) { + followerIndex = (String) args[1]; + } + return new Request((String) args[0], followerIndex, (Integer) args[2], (Integer) args[3], (Long) args[4], + (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]); + }); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOWER_INDEX_FIELD); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_WRITE_BUFFER_SIZE); + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), + MAX_RETRY_DELAY, + ObjectParser.ValueType.STRING); + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), + IDLE_SHARD_RETRY_DELAY, + ObjectParser.ValueType.STRING); + } + + public static Request fromXContent(final XContentParser parser, final String followerIndex) throws IOException { + Request request = PARSER.parse(parser, followerIndex); + if (followerIndex != null) { + if (request.followerIndex == null) { + request.followerIndex = followerIndex; + } else { + if (request.followerIndex.equals(followerIndex) == false) { + throw new IllegalArgumentException("provided follower_index is not equal"); + } + } + } + return request; + } + + private String leaderIndex; + + public String getLeaderIndex() { + return leaderIndex; + } + + + private String followerIndex; + + public String getFollowerIndex() { + return followerIndex; + } + + private int maxBatchOperationCount; + + public int getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + private int maxConcurrentReadBatches; + + public int getMaxConcurrentReadBatches() { + return maxConcurrentReadBatches; + } + + private long maxOperationSizeInBytes; + + public long getMaxOperationSizeInBytes() { + return maxOperationSizeInBytes; + } + + private int maxConcurrentWriteBatches; + + public int getMaxConcurrentWriteBatches() { + return maxConcurrentWriteBatches; + } + + private int maxWriteBufferSize; + + public int getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + private TimeValue maxRetryDelay; + + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; + } + + private TimeValue idleShardRetryDelay; + + public TimeValue getIdleShardRetryDelay() { + return idleShardRetryDelay; + } + + public Request( + final String leaderIndex, + final String followerIndex, + final Integer maxBatchOperationCount, + final Integer maxConcurrentReadBatches, + final Long maxOperationSizeInBytes, + final Integer maxConcurrentWriteBatches, + final Integer maxWriteBufferSize, + final TimeValue maxRetryDelay, + final TimeValue idleShardRetryDelay) { + + if (leaderIndex == null) { + throw new IllegalArgumentException(LEADER_INDEX_FIELD.getPreferredName() + " is missing"); + } + + if (followerIndex == null) { + throw new IllegalArgumentException(FOLLOWER_INDEX_FIELD.getPreferredName() + " is missing"); + } + + final int actualMaxBatchOperationCount = + maxBatchOperationCount == null ? DEFAULT_MAX_BATCH_OPERATION_COUNT : maxBatchOperationCount; + if (actualMaxBatchOperationCount < 1) { + throw new IllegalArgumentException(MAX_BATCH_OPERATION_COUNT.getPreferredName() + " must be larger than 0"); + } + + final int actualMaxConcurrentReadBatches = + maxConcurrentReadBatches == null ? DEFAULT_MAX_CONCURRENT_READ_BATCHES : maxConcurrentReadBatches; + if (actualMaxConcurrentReadBatches < 1) { + throw new IllegalArgumentException(MAX_CONCURRENT_READ_BATCHES.getPreferredName() + " must be larger than 0"); + } + + final long actualMaxOperationSizeInBytes = + maxOperationSizeInBytes == null ? DEFAULT_MAX_BATCH_SIZE_IN_BYTES : maxOperationSizeInBytes; + if (actualMaxOperationSizeInBytes <= 0) { + throw new IllegalArgumentException(MAX_BATCH_SIZE_IN_BYTES.getPreferredName() + " must be larger than 0"); + } + + final int actualMaxConcurrentWriteBatches = + maxConcurrentWriteBatches == null ? DEFAULT_MAX_CONCURRENT_WRITE_BATCHES : maxConcurrentWriteBatches; + if (actualMaxConcurrentWriteBatches < 1) { + throw new IllegalArgumentException(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName() + " must be larger than 0"); + } + + final int actualMaxWriteBufferSize = maxWriteBufferSize == null ? DEFAULT_MAX_WRITE_BUFFER_SIZE : maxWriteBufferSize; + if (actualMaxWriteBufferSize < 1) { + throw new IllegalArgumentException(MAX_WRITE_BUFFER_SIZE.getPreferredName() + " must be larger than 0"); + } + + final TimeValue actualRetryTimeout = maxRetryDelay == null ? DEFAULT_RETRY_TIMEOUT : maxRetryDelay; + final TimeValue actualIdleShardRetryDelay = idleShardRetryDelay == null ? DEFAULT_IDLE_SHARD_RETRY_DELAY : idleShardRetryDelay; + + this.leaderIndex = leaderIndex; + this.followerIndex = followerIndex; + this.maxBatchOperationCount = actualMaxBatchOperationCount; + this.maxConcurrentReadBatches = actualMaxConcurrentReadBatches; + this.maxOperationSizeInBytes = actualMaxOperationSizeInBytes; + this.maxConcurrentWriteBatches = actualMaxConcurrentWriteBatches; + this.maxWriteBufferSize = actualMaxWriteBufferSize; + this.maxRetryDelay = actualRetryTimeout; + this.idleShardRetryDelay = actualIdleShardRetryDelay; + } + + public Request() { + + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + leaderIndex = in.readString(); + followerIndex = in.readString(); + maxBatchOperationCount = in.readVInt(); + maxConcurrentReadBatches = in.readVInt(); + maxOperationSizeInBytes = in.readVLong(); + maxConcurrentWriteBatches = in.readVInt(); + maxWriteBufferSize = in.readVInt(); + maxRetryDelay = in.readOptionalTimeValue(); + idleShardRetryDelay = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(leaderIndex); + out.writeString(followerIndex); + out.writeVInt(maxBatchOperationCount); + out.writeVInt(maxConcurrentReadBatches); + out.writeVLong(maxOperationSizeInBytes); + out.writeVInt(maxConcurrentWriteBatches); + out.writeVInt(maxWriteBufferSize); + out.writeOptionalTimeValue(maxRetryDelay); + out.writeOptionalTimeValue(idleShardRetryDelay); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); + builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); + builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); + builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return maxBatchOperationCount == request.maxBatchOperationCount && + maxConcurrentReadBatches == request.maxConcurrentReadBatches && + maxOperationSizeInBytes == request.maxOperationSizeInBytes && + maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && + maxWriteBufferSize == request.maxWriteBufferSize && + Objects.equals(maxRetryDelay, request.maxRetryDelay) && + Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay) && + Objects.equals(leaderIndex, request.leaderIndex) && + Objects.equals(followerIndex, request.followerIndex); + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + followerIndex, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + maxRetryDelay, + idleShardRetryDelay + ); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowIndexAction.java new file mode 100644 index 0000000000000..65ecd3dad2f8f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowIndexAction.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class UnfollowIndexAction extends Action { + + public static final UnfollowIndexAction INSTANCE = new UnfollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/unfollow_index"; + + private UnfollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest { + + private String followIndex; + + public String getFollowIndex() { + return followIndex; + } + + public void setFollowIndex(final String followIndex) { + this.followIndex = followIndex; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + followIndex = in.readString(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(followIndex); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java new file mode 100644 index 0000000000000..881979e3d7972 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/client/CcrClient.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ccr.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.core.ccr.action.UnfollowIndexAction; + +import java.util.Objects; + +public class CcrClient { + + private final ElasticsearchClient client; + + public CcrClient(final ElasticsearchClient client) { + this.client = Objects.requireNonNull(client, "client"); + } + + public void createAndFollow( + final CreateAndFollowIndexAction.Request request, + final ActionListener listener) { + client.execute(CreateAndFollowIndexAction.INSTANCE, request, listener); + } + + public ActionFuture createAndFollow(final CreateAndFollowIndexAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(CreateAndFollowIndexAction.INSTANCE, request, listener); + return listener; + } + + public void follow(final FollowIndexAction.Request request, final ActionListener listener) { + client.execute(FollowIndexAction.INSTANCE, request, listener); + } + + public ActionFuture follow(final FollowIndexAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(FollowIndexAction.INSTANCE, request, listener); + return listener; + } + + public void stats( + final CcrStatsAction.StatsRequest request, + final ActionListener listener) { + client.execute(CcrStatsAction.INSTANCE, request, listener); + } + + public ActionFuture stats(final CcrStatsAction.StatsRequest request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(CcrStatsAction.INSTANCE, request, listener); + return listener; + } + + public void unfollow(final UnfollowIndexAction.Request request, final ActionListener listener) { + client.execute(UnfollowIndexAction.INSTANCE, request, listener); + } + + public ActionFuture unfollow(final UnfollowIndexAction.Request request) { + final PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(UnfollowIndexAction.INSTANCE, request, listener); + return listener; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java index 5484f9f9902f4..dd508dfb36b74 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructure.java @@ -92,7 +92,7 @@ public String toString() { static final ParseField STRUCTURE = new ParseField("format"); static final ParseField MULTILINE_START_PATTERN = new ParseField("multiline_start_pattern"); static final ParseField EXCLUDE_LINES_PATTERN = new ParseField("exclude_lines_pattern"); - static final ParseField INPUT_FIELDS = new ParseField("input_fields"); + static final ParseField COLUMN_NAMES = new ParseField("column_names"); static final ParseField HAS_HEADER_ROW = new ParseField("has_header_row"); static final ParseField DELIMITER = new ParseField("delimiter"); static final ParseField SHOULD_TRIM_FIELDS = new ParseField("should_trim_fields"); @@ -115,7 +115,7 @@ public String toString() { PARSER.declareString((p, c) -> p.setFormat(Format.fromString(c)), STRUCTURE); PARSER.declareString(Builder::setMultilineStartPattern, MULTILINE_START_PATTERN); PARSER.declareString(Builder::setExcludeLinesPattern, EXCLUDE_LINES_PATTERN); - PARSER.declareStringArray(Builder::setInputFields, INPUT_FIELDS); + PARSER.declareStringArray(Builder::setColumnNames, COLUMN_NAMES); PARSER.declareBoolean(Builder::setHasHeaderRow, HAS_HEADER_ROW); PARSER.declareString((p, c) -> p.setDelimiter(c.charAt(0)), DELIMITER); PARSER.declareBoolean(Builder::setShouldTrimFields, SHOULD_TRIM_FIELDS); @@ -142,7 +142,7 @@ public String toString() { private final Format format; private final String multilineStartPattern; private final String excludeLinesPattern; - private final List inputFields; + private final List columnNames; private final Boolean hasHeaderRow; private final Character delimiter; private final Boolean shouldTrimFields; @@ -155,7 +155,7 @@ public String toString() { private final List explanation; public FileStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sampleStart, String charset, Boolean hasByteOrderMarker, - Format format, String multilineStartPattern, String excludeLinesPattern, List inputFields, + Format format, String multilineStartPattern, String excludeLinesPattern, List columnNames, Boolean hasHeaderRow, Character delimiter, Boolean shouldTrimFields, String grokPattern, String timestampField, List timestampFormats, boolean needClientTimezone, Map mappings, Map fieldStats, List explanation) { @@ -168,7 +168,7 @@ public FileStructure(int numLinesAnalyzed, int numMessagesAnalyzed, String sampl this.format = Objects.requireNonNull(format); this.multilineStartPattern = multilineStartPattern; this.excludeLinesPattern = excludeLinesPattern; - this.inputFields = (inputFields == null) ? null : Collections.unmodifiableList(new ArrayList<>(inputFields)); + this.columnNames = (columnNames == null) ? null : Collections.unmodifiableList(new ArrayList<>(columnNames)); this.hasHeaderRow = hasHeaderRow; this.delimiter = delimiter; this.shouldTrimFields = shouldTrimFields; @@ -190,7 +190,7 @@ public FileStructure(StreamInput in) throws IOException { format = in.readEnum(Format.class); multilineStartPattern = in.readOptionalString(); excludeLinesPattern = in.readOptionalString(); - inputFields = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; + columnNames = in.readBoolean() ? Collections.unmodifiableList(in.readList(StreamInput::readString)) : null; hasHeaderRow = in.readOptionalBoolean(); delimiter = in.readBoolean() ? (char) in.readVInt() : null; shouldTrimFields = in.readOptionalBoolean(); @@ -213,11 +213,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(format); out.writeOptionalString(multilineStartPattern); out.writeOptionalString(excludeLinesPattern); - if (inputFields == null) { + if (columnNames == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeCollection(inputFields, StreamOutput::writeString); + out.writeCollection(columnNames, StreamOutput::writeString); } out.writeOptionalBoolean(hasHeaderRow); if (delimiter == null) { @@ -273,8 +273,8 @@ public String getExcludeLinesPattern() { return excludeLinesPattern; } - public List getInputFields() { - return inputFields; + public List getColumnNames() { + return columnNames; } public Boolean getHasHeaderRow() { @@ -335,8 +335,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (excludeLinesPattern != null && excludeLinesPattern.isEmpty() == false) { builder.field(EXCLUDE_LINES_PATTERN.getPreferredName(), excludeLinesPattern); } - if (inputFields != null && inputFields.isEmpty() == false) { - builder.field(INPUT_FIELDS.getPreferredName(), inputFields); + if (columnNames != null && columnNames.isEmpty() == false) { + builder.field(COLUMN_NAMES.getPreferredName(), columnNames); } if (hasHeaderRow != null) { builder.field(HAS_HEADER_ROW.getPreferredName(), hasHeaderRow.booleanValue()); @@ -377,7 +377,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public int hashCode() { return Objects.hash(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, + multilineStartPattern, excludeLinesPattern, columnNames, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, fieldStats, explanation); } @@ -402,7 +402,7 @@ public boolean equals(Object other) { Objects.equals(this.format, that.format) && Objects.equals(this.multilineStartPattern, that.multilineStartPattern) && Objects.equals(this.excludeLinesPattern, that.excludeLinesPattern) && - Objects.equals(this.inputFields, that.inputFields) && + Objects.equals(this.columnNames, that.columnNames) && Objects.equals(this.hasHeaderRow, that.hasHeaderRow) && Objects.equals(this.delimiter, that.delimiter) && Objects.equals(this.shouldTrimFields, that.shouldTrimFields) && @@ -424,7 +424,7 @@ public static class Builder { private Format format; private String multilineStartPattern; private String excludeLinesPattern; - private List inputFields; + private List columnNames; private Boolean hasHeaderRow; private Character delimiter; private Boolean shouldTrimFields; @@ -484,8 +484,8 @@ public Builder setExcludeLinesPattern(String excludeLinesPattern) { return this; } - public Builder setInputFields(List inputFields) { - this.inputFields = inputFields; + public Builder setColumnNames(List columnNames) { + this.columnNames = columnNames; return this; } @@ -573,6 +573,9 @@ public FileStructure build() { } // $FALL-THROUGH$ case XML: + if (columnNames != null) { + throw new IllegalArgumentException("Column names may not be specified for [" + format + "] structures."); + } if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); } @@ -584,8 +587,8 @@ public FileStructure build() { } break; case DELIMITED: - if (inputFields == null || inputFields.isEmpty()) { - throw new IllegalArgumentException("Input fields must be specified for [" + format + "] structures."); + if (columnNames == null || columnNames.isEmpty()) { + throw new IllegalArgumentException("Column names must be specified for [" + format + "] structures."); } if (hasHeaderRow == null) { throw new IllegalArgumentException("Has header row must be specified for [" + format + "] structures."); @@ -598,8 +601,8 @@ public FileStructure build() { } break; case SEMI_STRUCTURED_TEXT: - if (inputFields != null) { - throw new IllegalArgumentException("Input fields may not be specified for [" + format + "] structures."); + if (columnNames != null) { + throw new IllegalArgumentException("Column names may not be specified for [" + format + "] structures."); } if (hasHeaderRow != null) { throw new IllegalArgumentException("Has header row may not be specified for [" + format + "] structures."); @@ -635,7 +638,7 @@ public FileStructure build() { } return new FileStructure(numLinesAnalyzed, numMessagesAnalyzed, sampleStart, charset, hasByteOrderMarker, format, - multilineStartPattern, excludeLinesPattern, inputFields, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, + multilineStartPattern, excludeLinesPattern, columnNames, hasHeaderRow, delimiter, shouldTrimFields, grokPattern, timestampField, timestampFormats, needClientTimezone, mappings, fieldStats, explanation); } } diff --git a/x-pack/plugin/core/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/src/main/resources/monitoring-beats.json index 07756ba2602f0..d23db9a11a4aa 100644 --- a/x-pack/plugin/core/src/main/resources/monitoring-beats.json +++ b/x-pack/plugin/core/src/main/resources/monitoring-beats.json @@ -224,6 +224,274 @@ } } }, + "apm-server": { + "properties": { + "server": { + "properties": { + "request": { + "properties": { + "count": { + "type": "long" + } + } + }, + "concurrent": { + "properties": { + "wait": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "response": { + "properties": { + "count": { + "type": "long" + }, + "errors": { + "properties": { + "count": { + "type": "long" + }, + "toolarge": { + "type": "long" + }, + "validate": { + "type": "long" + }, + "ratelimit": { + "type": "long" + }, + "queue": { + "type": "long" + }, + "closed": { + "type": "long" + }, + "forbidden": { + "type": "long" + }, + "concurrency": { + "type": "long" + }, + "unauthorized": { + "type": "long" + }, + "decode": { + "type": "long" + }, + "method": { + "type": "long" + } + } + }, + "valid": { + "properties": { + "ok": { + "type": "long" + }, + "accepted": { + "type": "long" + }, + "count": { + "type": "long" + } + } + } + } + } + } + }, + "decoder": { + "properties": { + "deflate": { + "properties": { + "content-length": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "gzip": { + "properties": { + "content-length": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "uncompressed": { + "properties": { + "content-length": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "reader": { + "properties": { + "size": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "missing-content-length": { + "properties": { + "count": { + "type": "long" + } + } + } + } + + }, + "processor": { + "properties": { + "metric": { + "properties": { + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "transformations": { + "type": "long" + } + } + }, + "sourcemap": { + "properties": { + "counter": { + "type": "long" + }, + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + } + } + }, + "transaction": { + "properties": { + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "transformations": { + "type": "long" + }, + "transactions": { + "type": "long" + }, + "spans": { + "type": "long" + }, + "stacktraces": { + "type": "long" + }, + "frames": { + "type": "long" + } + } + }, + "error": { + "properties": { + "decoding": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "validation": { + "properties": { + "errors": { + "type": "long" + }, + "count": { + "type": "long" + } + } + }, + "transformations": { + "type": "long" + }, + "errors": { + "type": "long" + }, + "stacktraces": { + "type": "long" + }, + "frames": { + "type": "long" + } + } + } + } + } + } + }, "libbeat": { "properties": { "config": { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java index 6dcf675196508..e09b9e3f91e7a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/filestructurefinder/FileStructureTests.java @@ -50,18 +50,17 @@ public static FileStructure createTestFileStructure() { builder.setExcludeLinesPattern(randomAlphaOfLength(100)); } - if (format == FileStructure.Format.DELIMITED || (format.supportsNesting() && randomBoolean())) { - builder.setInputFields(Arrays.asList(generateRandomStringArray(10, 10, false, false))); - } if (format == FileStructure.Format.DELIMITED) { + builder.setColumnNames(Arrays.asList(generateRandomStringArray(10, 10, false, false))); builder.setHasHeaderRow(randomBoolean()); builder.setDelimiter(randomFrom(',', '\t', ';', '|')); } - if (format.isSemiStructured()) { + + if (format == FileStructure.Format.SEMI_STRUCTURED_TEXT) { builder.setGrokPattern(randomAlphaOfLength(100)); } - if (format.isSemiStructured() || randomBoolean()) { + if (format == FileStructure.Format.SEMI_STRUCTURED_TEXT || randomBoolean()) { builder.setTimestampField(randomAlphaOfLength(10)); builder.setTimestampFormats(Arrays.asList(generateRandomStringArray(3, 20, false, false))); builder.setNeedClientTimezone(randomBoolean()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index 625858c867a45..ba6b590dfc8cd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -49,10 +49,12 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List headerInfo = findHeaderFromSample(explanation, rows); boolean isHeaderInFile = headerInfo.v1(); String[] header = headerInfo.v2(); - String[] headerWithNamedBlanks = new String[header.length]; + // The column names are the header names but with blanks named column1, column2, etc. + String[] columnNames = new String[header.length]; for (int i = 0; i < header.length; ++i) { - String rawHeader = header[i].isEmpty() ? "column" + (i + 1) : header[i]; - headerWithNamedBlanks[i] = trimFields ? rawHeader.trim() : rawHeader; + assert header[i] != null; + String rawHeader = trimFields ? header[i].trim() : header[i]; + columnNames[i] = rawHeader.isEmpty() ? "column" + (i + 1) : rawHeader; } List sampleLines = Arrays.asList(sample.split("\n")); @@ -63,7 +65,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List row = rows.get(index); int lineNumber = lineNumbers.get(index); Map sampleRecord = new LinkedHashMap<>(); - Util.filterListToMap(sampleRecord, headerWithNamedBlanks, + Util.filterListToMap(sampleRecord, columnNames, trimFields ? row.stream().map(String::trim).collect(Collectors.toList()) : row); sampleRecords.add(sampleRecord); sampleMessages.add( @@ -82,7 +84,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List findHeaderFromSample(List explanation, L // SuperCSV will put nulls in the header if any columns don't have names, but empty strings are better for us return new Tuple<>(true, firstRow.stream().map(field -> (field == null) ? "" : field).toArray(String[]::new)); } else { - return new Tuple<>(false, IntStream.rangeClosed(1, firstRow.size()).mapToObj(num -> "column" + num).toArray(String[]::new)); + String[] dummyHeader = new String[firstRow.size()]; + Arrays.fill(dummyHeader, ""); + return new Tuple<>(false, dummyHeader); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java index 6d1f039399eba..4e692d583918e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java @@ -45,7 +45,7 @@ public void testCreateConfigsGivenCompleteCsv() throws Exception { assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); - assertEquals(Arrays.asList("time", "message"), structure.getInputFields()); + assertEquals(Arrays.asList("time", "message"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("time", structure.getTimestampField()); assertEquals(Collections.singletonList("ISO8601"), structure.getTimestampFormats()); @@ -76,7 +76,7 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); - assertEquals(Arrays.asList("message", "time", "count"), structure.getInputFields()); + assertEquals(Arrays.asList("message", "time", "count"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("time", structure.getTimestampField()); assertEquals(Collections.singletonList("ISO8601"), structure.getTimestampFormats()); @@ -114,7 +114,7 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", "RatecodeID", "store_and_fwd_flag", "PULocationID", "DOLocationID", "payment_type", "fare_amount", "extra", "mta_tax", - "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount", "column18", "column19"), structure.getInputFields()); + "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount", "column18", "column19"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("tpep_pickup_datetime", structure.getTimestampField()); assertEquals(Collections.singletonList("YYYY-MM-dd HH:mm:ss"), structure.getTimestampFormats()); @@ -152,7 +152,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce assertNull(structure.getShouldTrimFields()); assertEquals(Arrays.asList("VendorID", "tpep_pickup_datetime", "tpep_dropoff_datetime", "passenger_count", "trip_distance", "RatecodeID", "store_and_fwd_flag", "PULocationID", "DOLocationID", "payment_type", "fare_amount", "extra", "mta_tax", - "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount"), structure.getInputFields()); + "tip_amount", "tolls_amount", "improvement_surcharge", "total_amount"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("tpep_pickup_datetime", structure.getTimestampField()); assertEquals(Collections.singletonList("YYYY-MM-dd HH:mm:ss"), structure.getTimestampFormats()); @@ -183,7 +183,7 @@ public void testCreateConfigsGivenCsvWithTimeLastColumn() throws Exception { assertEquals(Character.valueOf(','), structure.getDelimiter()); assertTrue(structure.getHasHeaderRow()); assertNull(structure.getShouldTrimFields()); - assertEquals(Arrays.asList("pos_id", "trip_id", "latitude", "longitude", "altitude", "timestamp"), structure.getInputFields()); + assertEquals(Arrays.asList("pos_id", "trip_id", "latitude", "longitude", "altitude", "timestamp"), structure.getColumnNames()); assertNull(structure.getGrokPattern()); assertEquals("timestamp", structure.getTimestampField()); assertEquals(Collections.singletonList("YYYY-MM-dd HH:mm:ss.SSSSSS"), structure.getTimestampFormats()); @@ -213,7 +213,7 @@ public void testFindHeaderFromSampleGivenHeaderNotInSample() throws IOException DelimitedFileStructureFinder.readRows(withoutHeader, CsvPreference.EXCEL_PREFERENCE).v1()); assertFalse(header.v1()); - assertThat(header.v2(), arrayContaining("column1", "column2", "column3", "column4")); + assertThat(header.v2(), arrayContaining("", "", "", "")); } public void testLevenshteinDistance() { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index 4f9119df589b1..bb2ed76831da2 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.monitoring.action.TransportMonitoringBulkAction; import org.elasticsearch.xpack.monitoring.cleaner.CleanerService; import org.elasticsearch.xpack.monitoring.collector.Collector; +import org.elasticsearch.xpack.monitoring.collector.ccr.CcrStatsCollector; import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStatsCollector; import org.elasticsearch.xpack.monitoring.collector.indices.IndexRecoveryCollector; import org.elasticsearch.xpack.monitoring.collector.indices.IndexStatsCollector; @@ -142,6 +143,7 @@ public Collection createComponents(Client client, ClusterService cluster collectors.add(new NodeStatsCollector(settings, clusterService, getLicenseState(), client)); collectors.add(new IndexRecoveryCollector(settings, clusterService, getLicenseState(), client)); collectors.add(new JobStatsCollector(settings, clusterService, getLicenseState(), client)); + collectors.add(new CcrStatsCollector(settings, clusterService, getLicenseState(), client)); final MonitoringService monitoringService = new MonitoringService(settings, clusterService, threadPool, collectors, exporters); @@ -179,6 +181,7 @@ public List> getSettings() { settings.add(IndexRecoveryCollector.INDEX_RECOVERY_ACTIVE_ONLY); settings.add(IndexStatsCollector.INDEX_STATS_TIMEOUT); settings.add(JobStatsCollector.JOB_STATS_TIMEOUT); + settings.add(CcrStatsCollector.CCR_STATS_TIMEOUT); settings.add(NodeStatsCollector.NODE_STATS_TIMEOUT); settings.addAll(Exporters.getSettings()); return Collections.unmodifiableList(settings); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollector.java new file mode 100644 index 0000000000000..fbb7505af4d07 --- /dev/null +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollector.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.monitoring.collector.Collector; + +import java.util.Collection; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.monitoring.collector.ccr.CcrStatsMonitoringDoc.TYPE; + +public class CcrStatsCollector extends Collector { + + public static final Setting CCR_STATS_TIMEOUT = collectionTimeoutSetting("ccr.stats.timeout"); + + private final ThreadContext threadContext; + private final CcrClient ccrClient; + + public CcrStatsCollector( + final Settings settings, + final ClusterService clusterService, + final XPackLicenseState licenseState, + final Client client) { + this(settings, clusterService, licenseState, new XPackClient(client).ccr(), client.threadPool().getThreadContext()); + } + + CcrStatsCollector( + final Settings settings, + final ClusterService clusterService, + final XPackLicenseState licenseState, + final CcrClient ccrClient, + final ThreadContext threadContext) { + super(settings, TYPE, clusterService, CCR_STATS_TIMEOUT, licenseState); + this.ccrClient = ccrClient; + this.threadContext = threadContext; + } + + @Override + protected boolean shouldCollect(final boolean isElectedMaster) { + // this can only run when monitoring is allowed and CCR is enabled and allowed, but also only on the elected master node + return isElectedMaster + && super.shouldCollect(isElectedMaster) + && XPackSettings.CCR_ENABLED_SETTING.get(settings) + && licenseState.isCcrAllowed(); + } + + + @Override + protected Collection doCollect( + final MonitoringDoc.Node node, + final long interval, + final ClusterState clusterState) throws Exception { + try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, MONITORING_ORIGIN)) { + final CcrStatsAction.StatsRequest request = new CcrStatsAction.StatsRequest(); + request.setIndices(Strings.EMPTY_ARRAY); + final CcrStatsAction.StatsResponses responses = ccrClient.stats(request).actionGet(getCollectionTimeout()); + + final long timestamp = timestamp(); + final String clusterUuid = clusterUuid(clusterState); + + return responses + .getStatsResponses() + .stream() + .map(stats -> new CcrStatsMonitoringDoc(clusterUuid, timestamp, interval, node, stats.status())) + .collect(Collectors.toList()); + } + } + +} diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDoc.java new file mode 100644 index 0000000000000..45c6a8607d473 --- /dev/null +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDoc.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; + +import java.io.IOException; +import java.util.Objects; + +public class CcrStatsMonitoringDoc extends MonitoringDoc { + + public static final String TYPE = "ccr_stats"; + + private final ShardFollowNodeTaskStatus status; + + public ShardFollowNodeTaskStatus status() { + return status; + } + + public CcrStatsMonitoringDoc( + final String cluster, + final long timestamp, + final long intervalMillis, + final MonitoringDoc.Node node, + final ShardFollowNodeTaskStatus status) { + super(cluster, timestamp, intervalMillis, node, MonitoredSystem.ES, TYPE, null); + this.status = Objects.requireNonNull(status, "status"); + } + + + @Override + protected void innerToXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(TYPE); + { + status.toXContentFragment(builder, params); + } + builder.endObject(); + } + +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java new file mode 100644 index 0000000000000..aaf3a61643b5e --- /dev/null +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.monitoring.BaseCollectorTestCase; +import org.mockito.ArgumentMatcher; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.monitoring.MonitoringTestUtils.randomMonitoringNode; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class CcrStatsCollectorTests extends BaseCollectorTestCase { + + public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() { + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + final boolean ccrAllowed = randomBoolean(); + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + // this controls the blockage + when(licenseState.isMonitoringAllowed()).thenReturn(false); + when(licenseState.isCcrAllowed()).thenReturn(ccrAllowed); + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsFalseIfNotMaster() { + // regardless of CCR being enabled + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); + // this controls the blockage + final boolean isElectedMaster = false; + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + } + + public void testShouldCollectReturnsFalseIfCCRIsDisabled() { + // this is controls the blockage + final Settings settings = ccrDisabledSettings(); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + when(licenseState.isCcrAllowed()).thenReturn(randomBoolean()); + + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsFalseIfCCRIsNotAllowed() { + final Settings settings = randomFrom(ccrEnabledSettings(), ccrDisabledSettings()); + + when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean()); + // this is controls the blockage + when(licenseState.isCcrAllowed()).thenReturn(false); + final boolean isElectedMaster = randomBoolean(); + whenLocalNodeElectedMaster(isElectedMaster); + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(false)); + + if (isElectedMaster) { + verify(licenseState).isMonitoringAllowed(); + } + } + + public void testShouldCollectReturnsTrue() { + final Settings settings = ccrEnabledSettings(); + + when(licenseState.isMonitoringAllowed()).thenReturn(true); + when(licenseState.isCcrAllowed()).thenReturn(true); + final boolean isElectedMaster = true; + + final CcrStatsCollector collector = new CcrStatsCollector(settings, clusterService, licenseState, client); + + assertThat(collector.shouldCollect(isElectedMaster), is(true)); + + verify(licenseState).isMonitoringAllowed(); + } + + public void testDoCollect() throws Exception { + final String clusterUuid = randomAlphaOfLength(5); + whenClusterStateWithUUID(clusterUuid); + + final MonitoringDoc.Node node = randomMonitoringNode(random()); + final CcrClient client = mock(CcrClient.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); + withCollectionTimeout(CcrStatsCollector.CCR_STATS_TIMEOUT, timeout); + + final CcrStatsCollector collector = new CcrStatsCollector(Settings.EMPTY, clusterService, licenseState, client, threadContext); + assertEquals(timeout, collector.getCollectionTimeout()); + + final List statuses = mockStatuses(); + + @SuppressWarnings("unchecked") + final ActionFuture future = (ActionFuture)mock(ActionFuture.class); + final CcrStatsAction.StatsResponses responses = new CcrStatsAction.StatsResponses(emptyList(), emptyList(), statuses); + + final CcrStatsAction.StatsRequest request = new CcrStatsAction.StatsRequest(); + request.setIndices(Strings.EMPTY_ARRAY); + when(client.stats(statsRequestEq(request))).thenReturn(future); + when(future.actionGet(timeout)).thenReturn(responses); + + final long interval = randomNonNegativeLong(); + + final Collection documents = collector.doCollect(node, interval, clusterState); + verify(clusterState).metaData(); + verify(metaData).clusterUUID(); + + assertThat(documents, hasSize(statuses.size())); + + int index = 0; + for (final Iterator it = documents.iterator(); it.hasNext(); index++) { + final CcrStatsMonitoringDoc document = (CcrStatsMonitoringDoc)it.next(); + final CcrStatsAction.StatsResponse status = statuses.get(index); + + assertThat(document.getCluster(), is(clusterUuid)); + assertThat(document.getTimestamp(), greaterThan(0L)); + assertThat(document.getIntervalMillis(), equalTo(interval)); + assertThat(document.getNode(), equalTo(node)); + assertThat(document.getSystem(), is(MonitoredSystem.ES)); + assertThat(document.getType(), is(CcrStatsMonitoringDoc.TYPE)); + assertThat(document.getId(), nullValue()); + assertThat(document.status(), is(status.status())); + } + } + + private List mockStatuses() { + final int count = randomIntBetween(1, 8); + final List statuses = new ArrayList<>(count); + + for (int i = 0; i < count; ++i) { + CcrStatsAction.StatsResponse statsResponse = mock(CcrStatsAction.StatsResponse.class); + ShardFollowNodeTaskStatus status = mock(ShardFollowNodeTaskStatus.class); + when(statsResponse.status()).thenReturn(status); + statuses.add(statsResponse); + } + + return statuses; + } + + private Settings ccrEnabledSettings() { + // since it's the default, we want to ensure we test both with/without it + return randomBoolean() ? Settings.EMPTY : Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), true).build(); + } + + private Settings ccrDisabledSettings() { + return Settings.builder().put(XPackSettings.CCR_ENABLED_SETTING.getKey(), false).build(); + } + + private static CcrStatsAction.StatsRequest statsRequestEq(CcrStatsAction.StatsRequest expected) { + return argThat(new StatsRequestMatches(expected)); + } + + private static class StatsRequestMatches extends ArgumentMatcher { + + private final CcrStatsAction.StatsRequest expected; + + private StatsRequestMatches(CcrStatsAction.StatsRequest expected) { + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + CcrStatsAction.StatsRequest actual = (CcrStatsAction.StatsRequest) o; + return Arrays.equals(expected.indices(), actual.indices()); + } + } + +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java new file mode 100644 index 0000000000000..47f2bdf5d2e50 --- /dev/null +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.monitoring.collector.ccr; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.monitoring.exporter.BaseMonitoringDocTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.NavigableMap; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; + +public class CcrStatsMonitoringDocTests extends BaseMonitoringDocTestCase { + + private ShardFollowNodeTaskStatus status; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + status = mock(ShardFollowNodeTaskStatus.class); + } + + public void testConstructorStatusMustNotBeNull() { + final NullPointerException e = + expectThrows(NullPointerException.class, () -> new CcrStatsMonitoringDoc(cluster, timestamp, interval, node, null)); + assertThat(e, hasToString(containsString("status"))); + } + + @Override + protected CcrStatsMonitoringDoc createMonitoringDoc( + final String cluster, + final long timestamp, + final long interval, + final MonitoringDoc.Node node, + final MonitoredSystem system, + final String type, + final String id) { + return new CcrStatsMonitoringDoc(cluster, timestamp, interval, node, status); + } + + @Override + protected void assertMonitoringDoc(CcrStatsMonitoringDoc document) { + assertThat(document.getSystem(), is(MonitoredSystem.ES)); + assertThat(document.getType(), is(CcrStatsMonitoringDoc.TYPE)); + assertThat(document.getId(), nullValue()); + assertThat(document.status(), is(status)); + } + + @Override + public void testToXContent() throws IOException { + final long timestamp = System.currentTimeMillis(); + final long intervalMillis = System.currentTimeMillis(); + final long nodeTimestamp = System.currentTimeMillis(); + final MonitoringDoc.Node node = new MonitoringDoc.Node("_uuid", "_host", "_addr", "_ip", "_name", nodeTimestamp); + // these random values do not need to be internally consistent, they are only for testing formatting + final int shardId = randomIntBetween(0, Integer.MAX_VALUE); + final long leaderGlobalCheckpoint = randomNonNegativeLong(); + final long leaderMaxSeqNo = randomNonNegativeLong(); + final long followerGlobalCheckpoint = randomNonNegativeLong(); + final long followerMaxSeqNo = randomNonNegativeLong(); + final long lastRequestedSeqNo = randomNonNegativeLong(); + final int numberOfConcurrentReads = randomIntBetween(1, Integer.MAX_VALUE); + final int numberOfConcurrentWrites = randomIntBetween(1, Integer.MAX_VALUE); + final int numberOfQueuedWrites = randomIntBetween(0, Integer.MAX_VALUE); + final long mappingVersion = randomIntBetween(0, Integer.MAX_VALUE); + final long totalFetchTimeMillis = randomLongBetween(0, 4096); + final long numberOfSuccessfulFetches = randomNonNegativeLong(); + final long numberOfFailedFetches = randomLongBetween(0, 8); + final long operationsReceived = randomNonNegativeLong(); + final long totalTransferredBytes = randomNonNegativeLong(); + final long totalIndexTimeMillis = randomNonNegativeLong(); + final long numberOfSuccessfulBulkOperations = randomNonNegativeLong(); + final long numberOfFailedBulkOperations = randomNonNegativeLong(); + final long numberOfOperationsIndexed = randomNonNegativeLong(); + final NavigableMap fetchExceptions = + new TreeMap<>(Collections.singletonMap(randomNonNegativeLong(), new ElasticsearchException("shard is sad"))); + final long timeSinceLastFetchMillis = randomNonNegativeLong(); + final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + "cluster_alias:leader_index", + shardId, + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numberOfConcurrentReads, + numberOfConcurrentWrites, + numberOfQueuedWrites, + mappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + totalIndexTimeMillis, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + fetchExceptions, + timeSinceLastFetchMillis); + final CcrStatsMonitoringDoc document = new CcrStatsMonitoringDoc("_cluster", timestamp, intervalMillis, node, status); + final BytesReference xContent = XContentHelper.toXContent(document, XContentType.JSON, false); + assertThat( + xContent.utf8ToString(), + equalTo( + "{" + + "\"cluster_uuid\":\"_cluster\"," + + "\"timestamp\":\"" + new DateTime(timestamp, DateTimeZone.UTC).toString() + "\"," + + "\"interval_ms\":" + intervalMillis + "," + + "\"type\":\"ccr_stats\"," + + "\"source_node\":{" + + "\"uuid\":\"_uuid\"," + + "\"host\":\"_host\"," + + "\"transport_address\":\"_addr\"," + + "\"ip\":\"_ip\"," + + "\"name\":\"_name\"," + + "\"timestamp\":\"" + new DateTime(nodeTimestamp, DateTimeZone.UTC).toString() + "\"" + + "}," + + "\"ccr_stats\":{" + + "\"leader_index\":\"cluster_alias:leader_index\"," + + "\"shard_id\":" + shardId + "," + + "\"leader_global_checkpoint\":" + leaderGlobalCheckpoint + "," + + "\"leader_max_seq_no\":" + leaderMaxSeqNo + "," + + "\"follower_global_checkpoint\":" + followerGlobalCheckpoint + "," + + "\"follower_max_seq_no\":" + followerMaxSeqNo + "," + + "\"last_requested_seq_no\":" + lastRequestedSeqNo + "," + + "\"number_of_concurrent_reads\":" + numberOfConcurrentReads + "," + + "\"number_of_concurrent_writes\":" + numberOfConcurrentWrites + "," + + "\"number_of_queued_writes\":" + numberOfQueuedWrites + "," + + "\"mapping_version\":" + mappingVersion + "," + + "\"total_fetch_time_millis\":" + totalFetchTimeMillis + "," + + "\"number_of_successful_fetches\":" + numberOfSuccessfulFetches + "," + + "\"number_of_failed_fetches\":" + numberOfFailedFetches + "," + + "\"operations_received\":" + operationsReceived + "," + + "\"total_transferred_bytes\":" + totalTransferredBytes + "," + + "\"total_index_time_millis\":" + totalIndexTimeMillis +"," + + "\"number_of_successful_bulk_operations\":" + numberOfSuccessfulBulkOperations + "," + + "\"number_of_failed_bulk_operations\":" + numberOfFailedBulkOperations + "," + + "\"number_of_operations_indexed\":" + numberOfOperationsIndexed + "," + + "\"fetch_exceptions\":[" + + "{" + + "\"from_seq_no\":" + fetchExceptions.keySet().iterator().next() + "," + + "\"exception\":{" + + "\"type\":\"exception\"," + + "\"reason\":\"shard is sad\"" + + "}" + + "}" + + "]," + + "\"time_since_last_fetch_millis\":" + timeSinceLastFetchMillis + + "}" + + "}")); + } + +} diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 deleted file mode 100644 index 683b585bb2f61..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -126faacb28d1b8cc1ab81d702973d057892120d1 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 new file mode 100644 index 0000000000000..50a21f5c504a2 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-66c671ea80.jar.sha1 @@ -0,0 +1 @@ +06c1e4fa838807059d27aaf5405cfdfe7303369c \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java index 5143a7eceb415..dd18363b2a828 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java @@ -7,30 +7,28 @@ import org.elasticsearch.xpack.sql.tree.Location; +import java.util.List; import java.util.Objects; import static java.util.Collections.emptyList; -import java.util.List; - /** - * {@link Expression}s that can be converted into Elasticsearch - * sorts, aggregations, or queries. They can also be extracted - * from the result of a search. + * {@link Expression}s that can be materialized and represent the result columns sent to the client. + * Typically are converted into constants, functions or Elasticsearch order-bys, + * aggregations, or queries. They can also be extracted from the result of a search. * * In the statement {@code SELECT ABS(foo), A, B+C FROM ...} the three named - * expressions (ABS(foo), A, B+C) get converted to attributes and the user can + * expressions {@code ABS(foo), A, B+C} get converted to attributes and the user can * only see Attributes. * - * In the statement {@code SELECT foo FROM TABLE WHERE foo > 10 + 1} 10+1 is an - * expression. It's not named - meaning there's no alias for it (defined by the - * user) and as such there's no attribute - no column to be returned to the user. - * It's an expression used for filtering so it doesn't appear in the result set - * (derived table). "foo" on the other hand is an expression, a named expression - * (it has a name) and also an attribute - it's a column in the result set. + * In the statement {@code SELECT foo FROM TABLE WHERE foo > 10 + 1} both {@code foo} and + * {@code 10 + 1} are named expressions, the first due to the SELECT, the second due to being a function. + * However since {@code 10 + 1} is used for filtering it doesn't appear appear in the result set + * (derived table) and as such it is never translated to an attribute. + * "foo" on the other hand is since it's a column in the result set. * - * Another example {@code SELECT foo FROM ... WHERE bar > 10 +1} "foo" gets - * converted into an Attribute, bar does not. That's because bar is used for + * Another example {@code SELECT foo FROM ... WHERE bar > 10 +1} {@code foo} gets + * converted into an Attribute, bar does not. That's because {@code bar} is used for * filtering alone but it's not part of the projection meaning the user doesn't * need it in the derived table. */ diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java index 5851e99131435..1b326e0474fd2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -99,7 +99,7 @@ public static Attribute attribute(Expression e) { return ((NamedExpression) e).toAttribute(); } if (e != null && e.foldable()) { - return new LiteralAttribute(Literal.of(e)); + return Literal.of(e).toAttribute(); } return null; } @@ -120,4 +120,4 @@ public static TypeResolution typeMustBeNumeric(Expression e) { return e.dataType().isNumeric()? TypeResolution.TYPE_RESOLVED : new TypeResolution( "Argument required to be numeric ('" + Expressions.name(e) + "' of type '" + e.dataType().esType + "')"); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java index 9a4ffce929592..4badfc7091c58 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java @@ -12,9 +12,16 @@ import org.elasticsearch.xpack.sql.type.DataTypeConversion; import org.elasticsearch.xpack.sql.type.DataTypes; +import java.util.List; import java.util.Objects; -public class Literal extends LeafExpression { +import static java.util.Collections.emptyList; + +/** + * SQL Literal or constant. + */ +public class Literal extends NamedExpression { + public static final Literal TRUE = Literal.of(Location.EMPTY, Boolean.TRUE); public static final Literal FALSE = Literal.of(Location.EMPTY, Boolean.FALSE); @@ -22,7 +29,11 @@ public class Literal extends LeafExpression { private final DataType dataType; public Literal(Location location, Object value, DataType dataType) { - super(location); + this(location, null, value, dataType); + } + + public Literal(Location location, String name, Object value, DataType dataType) { + super(location, name == null ? String.valueOf(value) : name, emptyList(), null); this.dataType = dataType; this.value = DataTypeConversion.convert(value, dataType); } @@ -61,10 +72,24 @@ public Object fold() { return value; } + @Override + public Attribute toAttribute() { + return new LiteralAttribute(location(), name(), null, false, id(), false, dataType, this); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public AttributeSet references() { + return AttributeSet.EMPTY; + } @Override public int hashCode() { - return Objects.hash(value, dataType); + return Objects.hash(name(), value, dataType); } @Override @@ -72,21 +97,25 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || getClass() != obj.getClass()) { return false; } Literal other = (Literal) obj; - return Objects.equals(value, other.value) + return Objects.equals(name(), other.name()) + && Objects.equals(value, other.value) && Objects.equals(dataType, other.dataType); } @Override public String toString() { - return Objects.toString(value); + String s = String.valueOf(value); + return name().equals(s) ? s : name() + "=" + value; } + /** + * Utility method for creating 'in-line' Literals (out of values instead of expressions). + */ public static Literal of(Location loc, Object value) { if (value instanceof Literal) { return (Literal) value; @@ -94,15 +123,32 @@ public static Literal of(Location loc, Object value) { return new Literal(loc, value, DataTypes.fromJava(value)); } + /** + * Utility method for creating a literal out of a foldable expression. + * Throws an exception if the expression is not foldable. + */ public static Literal of(Expression foldable) { - if (foldable instanceof Literal) { - return (Literal) foldable; - } + return of((String) null, foldable); + } + public static Literal of(String name, Expression foldable) { if (!foldable.foldable()) { throw new SqlIllegalArgumentException("Foldable expression required for Literal creation; received unfoldable " + foldable); } - return new Literal(foldable.location(), foldable.fold(), foldable.dataType()); + if (foldable instanceof Literal) { + Literal l = (Literal) foldable; + if (name == null || l.name().equals(name)) { + return l; + } + } + + Object fold = foldable.fold(); + + if (name == null) { + name = foldable instanceof NamedExpression ? ((NamedExpression) foldable).name() : String.valueOf(fold); + } + + return new Literal(foldable.location(), name, fold, foldable.dataType()); } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java index ff07731b82ebd..a6483458a6b27 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java @@ -15,20 +15,12 @@ public class LiteralAttribute extends TypedAttribute { private final Literal literal; - public LiteralAttribute(Literal literal) { - this(literal.location(), String.valueOf(literal.fold()), null, false, null, false, literal.dataType(), literal); - } - public LiteralAttribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic, DataType dataType, Literal literal) { super(location, name, dataType, qualifier, nullable, id, synthetic); this.literal = literal; } - public Literal literal() { - return literal; - } - @Override protected NodeInfo info() { return NodeInfo.create(this, LiteralAttribute::new, @@ -49,4 +41,4 @@ public ProcessorDefinition asProcessorDefinition() { protected String label() { return "c"; } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index c9d652861f800..820aafb011628 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -21,13 +21,16 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.SumOfSquares; import org.elasticsearch.xpack.sql.expression.function.aggregate.VarPop; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mod; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayName; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfMonth; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfWeek; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.HourOfDay; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfDay; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfHour; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthName; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Quarter; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.SecondOfMinute; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.WeekOfYear; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; @@ -62,21 +65,21 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.string.BitLength; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Char; import org.elasticsearch.xpack.sql.expression.function.scalar.string.CharLength; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.LCase; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.LTrim; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.Length; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.RTrim; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.Space; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.UCase; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Insert; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.LCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Left; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Locate; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Position; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.RTrim; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Repeat; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Replace; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Right; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.Space; import org.elasticsearch.xpack.sql.expression.function.scalar.string.Substring; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.UCase; import org.elasticsearch.xpack.sql.parser.ParsingException; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.util.StringUtils; @@ -123,6 +126,9 @@ public class FunctionRegistry { def(MonthOfYear.class, MonthOfYear::new, "MONTH"), def(Year.class, Year::new), def(WeekOfYear.class, WeekOfYear::new, "WEEK"), + def(DayName.class, DayName::new, "DAYNAME"), + def(MonthName.class, MonthName::new, "MONTHNAME"), + def(Quarter.class, Quarter::new), // Math def(Abs.class, Abs::new), def(ACos.class, ACos::new), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index 0f36654fa4aff..a62aadab46705 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -10,6 +10,8 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BucketExtractorProcessor; @@ -17,13 +19,13 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; -import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringNumericProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringStringProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.ConcatFunctionProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.InsertFunctionProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.LocateFunctionProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor; import java.util.ArrayList; @@ -52,6 +54,8 @@ public static List getNamedWriteables() { entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); // datetime entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new)); + entries.add(new Entry(Processor.class, NamedDateTimeProcessor.NAME, NamedDateTimeProcessor::new)); + entries.add(new Entry(Processor.class, QuarterProcessor.NAME, QuarterProcessor::new)); // math entries.add(new Entry(Processor.class, MathProcessor.NAME, MathProcessor::new)); // string diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java index 8462ee293cc48..e7b8529557f4d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java @@ -68,8 +68,9 @@ protected ScriptTemplate asScript(Expression exp) { if (attr instanceof AggregateFunctionAttribute) { return asScriptFrom((AggregateFunctionAttribute) attr); } - // fall-back to - return asScriptFrom((FieldAttribute) attr); + if (attr instanceof FieldAttribute) { + return asScriptFrom((FieldAttribute) attr); + } } throw new SqlIllegalArgumentException("Cannot evaluate script for expression {}", exp); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java index 5715e19963cbc..e95fec863971b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryNumericFunction; @@ -65,7 +66,7 @@ protected ProcessorDefinition makeProcessorDefinition() { public String name() { StringBuilder sb = new StringBuilder(); sb.append("("); - sb.append(left()); + sb.append(Expressions.name(left())); if (!(left() instanceof Literal)) { sb.insert(1, "("); sb.append(")"); @@ -74,7 +75,7 @@ public String name() { sb.append(operation); sb.append(" "); int pos = sb.length(); - sb.append(right()); + sb.append(Expressions.name(right())); if (!(right() instanceof Literal)) { sb.insert(pos, "("); sb.append(")"); @@ -87,8 +88,4 @@ public String name() { public String toString() { return name() + "#" + functionId(); } - - protected boolean useParanthesis() { - return !(left() instanceof Literal) || !(right() instanceof Literal); - } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java new file mode 100644 index 0000000000000..2213fad8c8d9f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeFunction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.TimeZone; + +abstract class BaseDateTimeFunction extends UnaryScalarFunction { + + private final TimeZone timeZone; + private final String name; + + BaseDateTimeFunction(Location location, Expression field, TimeZone timeZone) { + super(location, field); + this.timeZone = timeZone; + + StringBuilder sb = new StringBuilder(super.name()); + // add timezone as last argument + sb.insert(sb.length() - 1, " [" + timeZone.getID() + "]"); + + this.name = sb.toString(); + } + + @Override + protected final NodeInfo info() { + return NodeInfo.create(this, ctorForInfo(), field(), timeZone()); + } + + protected abstract NodeInfo.NodeCtor2 ctorForInfo(); + + @Override + protected TypeResolution resolveType() { + if (field().dataType() == DataType.DATE) { + return TypeResolution.TYPE_RESOLVED; + } + return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression ([" + + Expressions.name(field()) + "] of type [" + field().dataType().esType + "])"); + } + + public TimeZone timeZone() { + return timeZone; + } + + @Override + public String name() { + return name; + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { + throw new UnsupportedOperationException(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java new file mode 100644 index 0000000000000..95547ded22274 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.joda.time.ReadableInstant; + +import java.io.IOException; +import java.util.TimeZone; + +public abstract class BaseDateTimeProcessor implements Processor { + + private final TimeZone timeZone; + + BaseDateTimeProcessor(TimeZone timeZone) { + this.timeZone = timeZone; + } + + BaseDateTimeProcessor(StreamInput in) throws IOException { + timeZone = TimeZone.getTimeZone(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(timeZone.getID()); + } + + TimeZone timeZone() { + return timeZone; + } + + @Override + public Object process(Object l) { + if (l == null) { + return null; + } + long millis; + if (l instanceof String) { + // 6.4+ + millis = Long.parseLong(l.toString()); + } else if (l instanceof ReadableInstant) { + // 6.3- + millis = ((ReadableInstant) l).getMillis(); + } else { + throw new SqlIllegalArgumentException("A string or a date is required; received {}", l); + } + + return doProcess(millis); + } + + abstract Object doProcess(long millis); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java index 606728222787b..d87e15084a422 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java @@ -6,10 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; -import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; -import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; @@ -17,7 +14,6 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; import org.elasticsearch.xpack.sql.tree.Location; -import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; import org.joda.time.DateTime; @@ -31,45 +27,10 @@ import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; -public abstract class DateTimeFunction extends UnaryScalarFunction { - - private final TimeZone timeZone; - private final String name; +public abstract class DateTimeFunction extends BaseDateTimeFunction { DateTimeFunction(Location location, Expression field, TimeZone timeZone) { - super(location, field); - this.timeZone = timeZone; - - StringBuilder sb = new StringBuilder(super.name()); - // add timezone as last argument - sb.insert(sb.length() - 1, " [" + timeZone.getID() + "]"); - - this.name = sb.toString(); - } - - @Override - protected final NodeInfo info() { - return NodeInfo.create(this, ctorForInfo(), field(), timeZone()); - } - - protected abstract NodeInfo.NodeCtor2 ctorForInfo(); - - @Override - protected TypeResolution resolveType() { - if (field().dataType() == DataType.DATE) { - return TypeResolution.TYPE_RESOLVED; - } - return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression ([" - + Expressions.name(field()) + "] of type [" + field().dataType().esType + "])"); - } - - public TimeZone timeZone() { - return timeZone; - } - - @Override - public boolean foldable() { - return field().foldable(); + super(location, field, timeZone); } @Override @@ -79,7 +40,7 @@ public Object fold() { return null; } - return dateTimeChrono(folded.getMillis(), timeZone.getID(), chronoField().name()); + return dateTimeChrono(folded.getMillis(), timeZone().getID(), chronoField().name()); } public static Integer dateTimeChrono(long millis, String tzId, String chronoName) { @@ -94,27 +55,21 @@ protected ScriptTemplate asScriptFrom(FieldAttribute field) { String template = null; template = formatTemplate("{sql}.dateTimeChrono(doc[{}].value.millis, {}, {})"); params.variable(field.name()) - .variable(timeZone.getID()) + .variable(timeZone().getID()) .variable(chronoField().name()); return new ScriptTemplate(template, params.build(), dataType()); } - - @Override - protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { - throw new UnsupportedOperationException(); - } - /** * Used for generating the painless script version of this function when the time zone is not UTC */ protected abstract ChronoField chronoField(); @Override - protected final ProcessorDefinition makeProcessorDefinition() { + protected ProcessorDefinition makeProcessorDefinition() { return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), - new DateTimeProcessor(extractor(), timeZone)); + new DateTimeProcessor(extractor(), timeZone())); } protected abstract DateTimeExtractor extractor(); @@ -127,12 +82,6 @@ public DataType dataType() { // used for applying ranges public abstract String dateTimeFormat(); - // add tz along the rest of the params - @Override - public String name() { - return name; - } - @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -140,11 +89,11 @@ public boolean equals(Object obj) { } DateTimeFunction other = (DateTimeFunction) obj; return Objects.equals(other.field(), field()) - && Objects.equals(other.timeZone, timeZone); + && Objects.equals(other.timeZone(), timeZone()); } @Override public int hashCode() { - return Objects.hash(field(), timeZone); + return Objects.hash(field(), timeZone()); } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java index d135b8a086566..d34b1c1e39053 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -7,19 +7,16 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; -import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; import org.joda.time.DateTime; import org.joda.time.DateTimeFieldType; import org.joda.time.DateTimeZone; import org.joda.time.ReadableDateTime; -import org.joda.time.ReadableInstant; import java.io.IOException; import java.util.Objects; import java.util.TimeZone; -public class DateTimeProcessor implements Processor { +public class DateTimeProcessor extends BaseDateTimeProcessor { public enum DateTimeExtractor { DAY_OF_MONTH(DateTimeFieldType.dayOfMonth()), @@ -45,24 +42,22 @@ public int extract(ReadableDateTime dt) { } public static final String NAME = "dt"; - private final DateTimeExtractor extractor; - private final TimeZone timeZone; public DateTimeProcessor(DateTimeExtractor extractor, TimeZone timeZone) { + super(timeZone); this.extractor = extractor; - this.timeZone = timeZone; } public DateTimeProcessor(StreamInput in) throws IOException { + super(in); extractor = in.readEnum(DateTimeExtractor.class); - timeZone = TimeZone.getTimeZone(in.readString()); } @Override public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); out.writeEnum(extractor); - out.writeString(timeZone.getID()); } @Override @@ -75,32 +70,15 @@ DateTimeExtractor extractor() { } @Override - public Object process(Object l) { - if (l == null) { - return null; - } - - ReadableDateTime dt; - if (l instanceof String) { - // 6.4+ - final long millis = Long.parseLong(l.toString()); - dt = new DateTime(millis, DateTimeZone.forTimeZone(timeZone)); - } else if (l instanceof ReadableInstant) { - // 6.3- - dt = (ReadableDateTime) l; - if (!TimeZone.getTimeZone("UTC").equals(timeZone)) { - dt = dt.toDateTime().withZone(DateTimeZone.forTimeZone(timeZone)); - } - } else { - throw new SqlIllegalArgumentException("A string or a date is required; received {}", l); - } + public Object doProcess(long millis) { + ReadableDateTime dt = new DateTime(millis, DateTimeZone.forTimeZone(timeZone())); return extractor.extract(dt); } @Override public int hashCode() { - return Objects.hash(extractor, timeZone); + return Objects.hash(extractor, timeZone()); } @Override @@ -110,7 +88,7 @@ public boolean equals(Object obj) { } DateTimeProcessor other = (DateTimeProcessor) obj; return Objects.equals(extractor, other.extractor) - && Objects.equals(timeZone, other.timeZone); + && Objects.equals(timeZone(), other.timeZone()); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayName.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayName.java new file mode 100644 index 0000000000000..2f5ba7eeaca9f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayName.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.util.TimeZone; + +/** + * Extract the day of the week from a datetime in text format (Monday, Tuesday etc.) + */ +public class DayName extends NamedDateTimeFunction { + protected static final String DAY_NAME_FORMAT = "EEEE"; + + public DayName(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return DayName::new; + } + + @Override + protected DayName replaceChild(Expression newChild) { + return new DayName(location(), newChild, timeZone()); + } + + @Override + protected String dateTimeFormat() { + return DAY_NAME_FORMAT; + } + + @Override + protected NameExtractor nameExtractor() { + return NameExtractor.DAY_NAME; + } + + @Override + public String extractName(long millis, String tzId) { + return nameExtractor().extract(millis, tzId); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java index 1ac3771d49db1..ebb576b4648e1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java @@ -22,7 +22,7 @@ public DayOfMonth(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return DayOfMonth::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java index 7582ece6250bd..d840d4d71df0a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java @@ -22,7 +22,7 @@ public DayOfWeek(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return DayOfWeek::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java index 8f5e06188327d..1fa248d9c2063 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java @@ -23,7 +23,7 @@ public DayOfYear(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return DayOfYear::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java index 5a2bc681ab882..4df28bddad088 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java @@ -22,7 +22,7 @@ public HourOfDay(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return HourOfDay::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java index 2840fa0c21b85..ef0fb0bce18aa 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java @@ -23,7 +23,7 @@ public MinuteOfDay(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return MinuteOfDay::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java index d577bb916966a..f5ab095ef2455 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java @@ -22,7 +22,7 @@ public MinuteOfHour(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return MinuteOfHour::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthName.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthName.java new file mode 100644 index 0000000000000..170c80c10f91a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthName.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.util.TimeZone; + +/** + * Extract the month from a datetime in text format (January, February etc.) + */ +public class MonthName extends NamedDateTimeFunction { + protected static final String MONTH_NAME_FORMAT = "MMMM"; + + public MonthName(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return MonthName::new; + } + + @Override + protected MonthName replaceChild(Expression newChild) { + return new MonthName(location(), newChild, timeZone()); + } + + @Override + protected String dateTimeFormat() { + return MONTH_NAME_FORMAT; + } + + @Override + public String extractName(long millis, String tzId) { + return nameExtractor().extract(millis, tzId); + } + + @Override + protected NameExtractor nameExtractor() { + return NameExtractor.MONTH_NAME; + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java index 3a2d51bee78ad..503a771611e7d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java @@ -22,7 +22,7 @@ public MonthOfYear(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return MonthOfYear::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java new file mode 100644 index 0000000000000..c3e10981ce1fe --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; +import org.joda.time.DateTime; + +import java.util.Objects; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +/* + * Base class for "naming" date/time functions like month_name and day_name + */ +abstract class NamedDateTimeFunction extends BaseDateTimeFunction { + + NamedDateTimeFunction(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public Object fold() { + DateTime folded = (DateTime) field().fold(); + if (folded == null) { + return null; + } + + return extractName(folded.getMillis(), timeZone().getID()); + } + + public abstract String extractName(long millis, String tzId); + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + ParamsBuilder params = paramsBuilder(); + + String template = null; + template = formatTemplate(formatMethodName("{sql}.{method_name}(doc[{}].value.millis, {})")); + params.variable(field.name()) + .variable(timeZone().getID()); + + return new ScriptTemplate(template, params.build(), dataType()); + } + + private String formatMethodName(String template) { + // the Painless method name will be the enum's lower camelcase name + return template.replace("{method_name}", StringUtils.underscoreToLowerCamelCase(nameExtractor().toString())); + } + + @Override + protected final ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), + new NamedDateTimeProcessor(nameExtractor(), timeZone())); + } + + protected abstract NameExtractor nameExtractor(); + + protected abstract String dateTimeFormat(); + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + NamedDateTimeFunction other = (NamedDateTimeFunction) obj; + return Objects.equals(other.field(), field()) + && Objects.equals(other.timeZone(), timeZone()); + } + + @Override + public int hashCode() { + return Objects.hash(field(), timeZone()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java new file mode 100644 index 0000000000000..478ad8ee09f04 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Locale; +import java.util.Objects; +import java.util.TimeZone; +import java.util.function.BiFunction; + +public class NamedDateTimeProcessor extends BaseDateTimeProcessor { + + public enum NameExtractor { + // for the moment we'll use no specific Locale, but we might consider introducing a Locale parameter, just like the timeZone one + DAY_NAME((Long millis, String tzId) -> { + ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)); + return time.format(DateTimeFormatter.ofPattern(DayName.DAY_NAME_FORMAT, Locale.ROOT)); + }), + MONTH_NAME((Long millis, String tzId) -> { + ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)); + return time.format(DateTimeFormatter.ofPattern(MonthName.MONTH_NAME_FORMAT, Locale.ROOT)); + }); + + private final BiFunction apply; + + NameExtractor(BiFunction apply) { + this.apply = apply; + } + + public final String extract(Long millis, String tzId) { + return apply.apply(millis, tzId); + } + } + + public static final String NAME = "ndt"; + + private final NameExtractor extractor; + + public NamedDateTimeProcessor(NameExtractor extractor, TimeZone timeZone) { + super(timeZone); + this.extractor = extractor; + } + + public NamedDateTimeProcessor(StreamInput in) throws IOException { + super(in); + extractor = in.readEnum(NameExtractor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(extractor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + NameExtractor extractor() { + return extractor; + } + + @Override + public Object doProcess(long millis) { + return extractor.extract(millis, timeZone().getID()); + } + + @Override + public int hashCode() { + return Objects.hash(extractor, timeZone()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + NamedDateTimeProcessor other = (NamedDateTimeProcessor) obj; + return Objects.equals(extractor, other.extractor) + && Objects.equals(timeZone(), other.timeZone()); + } + + @Override + public String toString() { + return extractor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java new file mode 100644 index 0000000000000..22e368b0ec6ba --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTime; + +import java.util.Objects; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor.quarter; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +public class Quarter extends BaseDateTimeFunction { + + protected static final String QUARTER_FORMAT = "q"; + + public Quarter(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + public Object fold() { + DateTime folded = (DateTime) field().fold(); + if (folded == null) { + return null; + } + + return quarter(folded.getMillis(), timeZone().getID()); + } + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + ParamsBuilder params = paramsBuilder(); + + String template = null; + template = formatTemplate("{sql}.quarter(doc[{}].value.millis, {})"); + params.variable(field.name()) + .variable(timeZone().getID()); + + return new ScriptTemplate(template, params.build(), dataType()); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return Quarter::new; + } + + @Override + protected Quarter replaceChild(Expression newChild) { + return new Quarter(location(), newChild, timeZone()); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), + new QuarterProcessor(timeZone())); + } + + @Override + public DataType dataType() { + return DataType.INTEGER; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + BaseDateTimeFunction other = (BaseDateTimeFunction) obj; + return Objects.equals(other.field(), field()) + && Objects.equals(other.timeZone(), timeZone()); + } + + @Override + public int hashCode() { + return Objects.hash(field(), timeZone()); + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java new file mode 100644 index 0000000000000..c6904216d0fec --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.Locale; +import java.util.Objects; +import java.util.TimeZone; + +public class QuarterProcessor extends BaseDateTimeProcessor { + + public QuarterProcessor(TimeZone timeZone) { + super(timeZone); + } + + public QuarterProcessor(StreamInput in) throws IOException { + super(in); + } + + public static final String NAME = "q"; + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object doProcess(long millis) { + return quarter(millis, timeZone().getID()); + } + + public static Integer quarter(long millis, String tzId) { + ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId)); + return Integer.valueOf(time.format(DateTimeFormatter.ofPattern(Quarter.QUARTER_FORMAT, Locale.ROOT))); + } + + @Override + public int hashCode() { + return Objects.hash(timeZone()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + DateTimeProcessor other = (DateTimeProcessor) obj; + return Objects.equals(timeZone(), other.timeZone()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java index 883502c017da5..3522eb10ffe80 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java @@ -22,7 +22,7 @@ public SecondOfMinute(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return SecondOfMinute::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java index eef2c48ad0f72..59948165f71cb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java @@ -22,7 +22,7 @@ public WeekOfYear(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return WeekOfYear::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java index 28d475e4c7085..2b065329be305 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java @@ -22,7 +22,7 @@ public Year(Location location, Expression field, TimeZone timeZone) { } @Override - protected NodeCtor2 ctorForInfo() { + protected NodeCtor2 ctorForInfo() { return Year::new; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java index 921b6edaef632..a3fdfa654dfa4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java @@ -21,7 +21,7 @@ public class E extends MathFunction { private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.E", Params.EMPTY, DataType.DOUBLE); public E(Location location) { - super(location, new Literal(location, Math.E, DataType.DOUBLE)); + super(location, new Literal(location, "E", Math.E, DataType.DOUBLE)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java index 9758843ee5d52..e57aa333f06c0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java @@ -21,7 +21,7 @@ public class Pi extends MathFunction { private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.PI", Params.EMPTY, DataType.DOUBLE); public Pi(Location location) { - super(location, new Literal(location, Math.PI, DataType.DOUBLE)); + super(location, new Literal(location, "PI", Math.PI, DataType.DOUBLE)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java index 9325986ac1f1c..3834b16ff1e78 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/Replace.java @@ -22,7 +22,7 @@ import static java.lang.String.format; import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; -import static org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor.doProcess; +import static org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor.doProcess; /** * Search the source string for occurrences of the pattern, and replace with the replacement string. diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index 12faeb78b662d..f0a79f15e36dd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.whitelist; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringNumericProcessor.BinaryStringNumericOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.string.BinaryStringStringProcessor.BinaryStringStringOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.string.ConcatFunctionProcessor; @@ -28,6 +30,18 @@ public static Integer dateTimeChrono(long millis, String tzId, String chronoName return DateTimeFunction.dateTimeChrono(millis, tzId, chronoName); } + public static String dayName(long millis, String tzId) { + return NameExtractor.DAY_NAME.extract(millis, tzId); + } + + public static String monthName(long millis, String tzId) { + return NameExtractor.MONTH_NAME.extract(millis, tzId); + } + + public static Integer quarter(long millis, String tzId) { + return QuarterProcessor.quarter(millis, tzId); + } + public static Integer ascii(String s) { return (Integer) StringOperation.ASCII.apply(s); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 55c4112d38b6d..72105a2fae897 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -1118,36 +1118,12 @@ static class ConstantFolding extends OptimizerExpressionRule { @Override protected Expression rule(Expression e) { - // handle aliases to avoid double aliasing of functions - // alias points to function which gets folded and wrapped in an alias that is - // aliases if (e instanceof Alias) { Alias a = (Alias) e; - Expression fold = fold(a.child()); - if (fold != a.child()) { - return new Alias(a.location(), a.name(), null, fold, a.id()); - } - return a; - } - - Expression fold = fold(e); - if (fold != e) { - // preserve the name through an alias - if (e instanceof NamedExpression) { - NamedExpression ne = (NamedExpression) e; - return new Alias(e.location(), ne.name(), null, fold, ne.id()); - } - return fold; + return a.child().foldable() ? Literal.of(a.name(), a.child()) : a; } - return e; - } - private Expression fold(Expression e) { - // literals are always foldable, so avoid creating a duplicate - if (e.foldable() && !(e instanceof Literal)) { - return new Literal(e.location(), e.fold(), e.dataType()); - } - return e; + return e.foldable() ? Literal.of(e) : e; } } @@ -1836,14 +1812,11 @@ protected LogicalPlan rule(LogicalPlan plan) { private List extractConstants(List named) { List values = new ArrayList<>(); for (NamedExpression n : named) { - if (n instanceof Alias) { - Alias a = (Alias) n; - if (a.child().foldable()) { - values.add(a.child().fold()); - } - else { - return values; - } + if (n.foldable()) { + values.add(n.fold()); + } else { + // not everything is foldable, bail-out early + return values; } } return values; diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 8f86685889c55..0f12d32d44e8b 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -9,6 +9,9 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalSqlScriptUtils { Integer dateTimeChrono(long, String, String) + String dayName(long, String) + String monthName(long, String) + Integer quarter(long, String) Integer ascii(String) Integer bitLength(String) String character(Number) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java index 8527c5b62dfae..d6bd6ab96b218 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java @@ -61,7 +61,7 @@ protected Literal randomInstance() { @Override protected Literal copy(Literal instance) { - return new Literal(instance.location(), instance.value(), instance.dataType()); + return new Literal(instance.location(), instance.name(), instance.value(), instance.dataType()); } @Override diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/NamedExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/NamedExpressionTests.java index 79f0e970b1eba..3692e5e4752af 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/NamedExpressionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/NamedExpressionTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Add; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Div; @@ -13,7 +14,10 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mul; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Neg; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Sub; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; +import static java.util.Collections.emptyMap; import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; public class NamedExpressionTests extends ESTestCase { @@ -38,6 +42,12 @@ public void testArithmeticFunctionName() { assertEquals("-5", neg.name()); } + public void testNameForArithmeticFunctionAppliedOnTableColumn() { + FieldAttribute fa = new FieldAttribute(EMPTY, "myField", new EsField("myESField", DataType.INTEGER, emptyMap(), true)); + Add add = new Add(EMPTY, fa, l(10)); + assertEquals("((myField) + 10)", add.name()); + } + private static Literal l(Object value) { return Literal.of(EMPTY, value); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java new file mode 100644 index 0000000000000..3d57675e20919 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.TimeZone; + +public class NamedDateTimeProcessorTests extends AbstractWireSerializingTestCase { + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + public static NamedDateTimeProcessor randomNamedDateTimeProcessor() { + return new NamedDateTimeProcessor(randomFrom(NameExtractor.values()), UTC); + } + + @Override + protected NamedDateTimeProcessor createTestInstance() { + return randomNamedDateTimeProcessor(); + } + + @Override + protected Reader instanceReader() { + return NamedDateTimeProcessor::new; + } + + @Override + protected NamedDateTimeProcessor mutateInstance(NamedDateTimeProcessor instance) throws IOException { + NameExtractor replaced = randomValueOtherThan(instance.extractor(), () -> randomFrom(NameExtractor.values())); + return new NamedDateTimeProcessor(replaced, UTC); + } + + public void testValidDayNamesInUTC() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, UTC); + assertEquals("Thursday", proc.process("0")); + assertEquals("Saturday", proc.process("-64164233612338")); + assertEquals("Monday", proc.process("64164233612338")); + + assertEquals("Thursday", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("Thursday", proc.process(new DateTime(-5400, 12, 25, 2, 0, DateTimeZone.UTC))); + assertEquals("Friday", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("Tuesday", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + } + + public void testValidDayNamesWithNonUTCTimeZone() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, TimeZone.getTimeZone("GMT-10:00")); + assertEquals("Wednesday", proc.process("0")); + assertEquals("Friday", proc.process("-64164233612338")); + assertEquals("Monday", proc.process("64164233612338")); + + assertEquals("Wednesday", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("Wednesday", proc.process(new DateTime(-5400, 12, 25, 2, 0, DateTimeZone.UTC))); + assertEquals("Friday", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("Tuesday", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + assertEquals("Monday", proc.process(new DateTime(10902, 8, 22, 9, 59, DateTimeZone.UTC))); + } + + public void testValidMonthNamesInUTC() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, UTC); + assertEquals("January", proc.process("0")); + assertEquals("September", proc.process("-64164233612338")); + assertEquals("April", proc.process("64164233612338")); + + assertEquals("January", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("December", proc.process(new DateTime(-5400, 12, 25, 10, 10, DateTimeZone.UTC))); + assertEquals("February", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("August", proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + } + + public void testValidMonthNamesWithNonUTCTimeZone() { + NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.MONTH_NAME, TimeZone.getTimeZone("GMT-3:00")); + assertEquals("December", proc.process("0")); + assertEquals("August", proc.process("-64165813612338")); // GMT: Tuesday, September 1, -0064 2:53:07.662 AM + assertEquals("April", proc.process("64164233612338")); // GMT: Monday, April 14, 4003 2:13:32.338 PM + + assertEquals("December", proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals("November", proc.process(new DateTime(-5400, 12, 1, 1, 1, DateTimeZone.UTC))); + assertEquals("February", proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals("July", proc.process(new DateTime(10902, 8, 1, 2, 59, DateTimeZone.UTC))); + assertEquals("August", proc.process(new DateTime(10902, 8, 1, 3, 00, DateTimeZone.UTC))); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java new file mode 100644 index 0000000000000..7747bb8cae4ed --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessorTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.TimeZone; + +public class QuarterProcessorTests extends ESTestCase { + + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + public void testQuarterWithUTCTimezone() { + QuarterProcessor proc = new QuarterProcessor(UTC); + + assertEquals(1, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(4, proc.process(new DateTime(-5400, 12, 25, 10, 10, DateTimeZone.UTC))); + assertEquals(1, proc.process(new DateTime(30, 2, 1, 12, 13, DateTimeZone.UTC))); + assertEquals(3, proc.process(new DateTime(10902, 8, 22, 11, 11, DateTimeZone.UTC))); + + assertEquals(1, proc.process("0")); + assertEquals(3, proc.process("-64164233612338")); + assertEquals(2, proc.process("64164233612338")); + } + + public void testValidDayNamesWithNonUTCTimeZone() { + QuarterProcessor proc = new QuarterProcessor(TimeZone.getTimeZone("GMT-10:00")); + assertEquals(4, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(4, proc.process(new DateTime(-5400, 1, 1, 5, 0, DateTimeZone.UTC))); + assertEquals(1, proc.process(new DateTime(30, 4, 1, 9, 59, DateTimeZone.UTC))); + + proc = new QuarterProcessor(TimeZone.getTimeZone("GMT+10:00")); + assertEquals(4, proc.process(new DateTime(10902, 9, 30, 14, 1, DateTimeZone.UTC))); + assertEquals(3, proc.process(new DateTime(10902, 9, 30, 13, 59, DateTimeZone.UTC))); + + assertEquals(1, proc.process("0")); + assertEquals(3, proc.process("-64164233612338")); + assertEquals(2, proc.process("64164233612338")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index ed4e54701dc0a..07349008c077d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -85,6 +85,14 @@ public class OptimizerTests extends ESTestCase { private static final Expression DUMMY_EXPRESSION = new DummyBooleanExpression(EMPTY, 0); + private static final Literal ONE = L(1); + private static final Literal TWO = L(2); + private static final Literal THREE = L(3); + private static final Literal FOUR = L(4); + private static final Literal FIVE = L(5); + private static final Literal SIX = L(6); + + public static class DummyBooleanExpression extends Expression { private final int id; @@ -161,7 +169,7 @@ public void testDuplicateFunctions() { public void testCombineProjections() { // a - Alias a = new Alias(EMPTY, "a", L(5)); + Alias a = new Alias(EMPTY, "a", FIVE); // b Alias b = new Alias(EMPTY, "b", L(10)); // x -> a @@ -187,7 +195,7 @@ public void testReplaceFoldableAttributes() { // SELECT 5 a, 10 b FROM foo WHERE a < 10 ORDER BY b // a - Alias a = new Alias(EMPTY, "a", L(5)); + Alias a = new Alias(EMPTY, "a", FIVE); // b Alias b = new Alias(EMPTY, "b", L(10)); // WHERE a < 10 @@ -226,49 +234,44 @@ public void testReplaceFoldableAttributes() { // public void testConstantFolding() { - Expression exp = new Add(EMPTY, L(2), L(3)); + Expression exp = new Add(EMPTY, TWO, THREE); assertTrue(exp.foldable()); assertTrue(exp instanceof NamedExpression); String n = Expressions.name(exp); Expression result = new ConstantFolding().rule(exp); - assertTrue(result instanceof Alias); + assertTrue(result instanceof Literal); assertEquals(n, Expressions.name(result)); - Expression c = ((Alias) result).child(); - assertTrue(c instanceof Literal); - assertEquals(5, ((Literal) c).value()); + assertEquals(5, ((Literal) result).value()); // check now with an alias result = new ConstantFolding().rule(new Alias(EMPTY, "a", exp)); - assertTrue(result instanceof Alias); assertEquals("a", Expressions.name(result)); - c = ((Alias) result).child(); - assertTrue(c instanceof Literal); - assertEquals(5, ((Literal) c).value()); + assertEquals(5, ((Literal) result).value()); } public void testConstantFoldingBinaryComparison() { - assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThan(EMPTY, L(2), L(3)))); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThanOrEqual(EMPTY, L(2), L(3)))); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new Equals(EMPTY, L(2), L(3)))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThanOrEqual(EMPTY, L(2), L(3)))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThan(EMPTY, L(2), L(3)))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThan(EMPTY, TWO, THREE))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThanOrEqual(EMPTY, TWO, THREE))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new Equals(EMPTY, TWO, THREE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThanOrEqual(EMPTY, TWO, THREE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThan(EMPTY, TWO, THREE))); } public void testConstantFoldingBinaryLogic() { - assertEquals(Literal.FALSE, new ConstantFolding().rule(new And(EMPTY, new GreaterThan(EMPTY, L(2), L(3)), Literal.TRUE))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new Or(EMPTY, new GreaterThanOrEqual(EMPTY, L(2), L(3)), Literal.TRUE))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new And(EMPTY, new GreaterThan(EMPTY, TWO, THREE), Literal.TRUE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new Or(EMPTY, new GreaterThanOrEqual(EMPTY, TWO, THREE), Literal.TRUE))); } public void testConstantFoldingRange() { - assertEquals(Literal.TRUE, new ConstantFolding().rule(new Range(EMPTY, L(5), L(5), true, L(10), false))); - assertEquals(Literal.FALSE, new ConstantFolding().rule(new Range(EMPTY, L(5), L(5), false, L(10), false))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new Range(EMPTY, FIVE, FIVE, true, L(10), false))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new Range(EMPTY, FIVE, FIVE, false, L(10), false))); } public void testConstantIsNotNull() { assertEquals(Literal.FALSE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(null)))); - assertEquals(Literal.TRUE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(5)))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new IsNotNull(EMPTY, FIVE))); } public void testConstantNot() { @@ -296,30 +299,24 @@ public void testConstantFoldingDatetime() { } public void testArithmeticFolding() { - assertEquals(10, foldFunction(new Add(EMPTY, L(7), L(3)))); - assertEquals(4, foldFunction(new Sub(EMPTY, L(7), L(3)))); - assertEquals(21, foldFunction(new Mul(EMPTY, L(7), L(3)))); - assertEquals(2, foldFunction(new Div(EMPTY, L(7), L(3)))); - assertEquals(1, foldFunction(new Mod(EMPTY, L(7), L(3)))); + assertEquals(10, foldFunction(new Add(EMPTY, L(7), THREE))); + assertEquals(4, foldFunction(new Sub(EMPTY, L(7), THREE))); + assertEquals(21, foldFunction(new Mul(EMPTY, L(7), THREE))); + assertEquals(2, foldFunction(new Div(EMPTY, L(7), THREE))); + assertEquals(1, foldFunction(new Mod(EMPTY, L(7), THREE))); } public void testMathFolding() { assertEquals(7, foldFunction(new Abs(EMPTY, L(7)))); - assertEquals(0d, (double) foldFunction(new ACos(EMPTY, L(1))), 0.01d); - assertEquals(1.57076d, (double) foldFunction(new ASin(EMPTY, L(1))), 0.01d); - assertEquals(0.78539d, (double) foldFunction(new ATan(EMPTY, L(1))), 0.01d); + assertEquals(0d, (double) foldFunction(new ACos(EMPTY, ONE)), 0.01d); + assertEquals(1.57076d, (double) foldFunction(new ASin(EMPTY, ONE)), 0.01d); + assertEquals(0.78539d, (double) foldFunction(new ATan(EMPTY, ONE)), 0.01d); assertEquals(7, foldFunction(new Floor(EMPTY, L(7)))); assertEquals(Math.E, foldFunction(new E(EMPTY))); } private static Object foldFunction(Function f) { - return unwrapAlias(new ConstantFolding().rule(f)); - } - - private static Object unwrapAlias(Expression e) { - Alias a = (Alias) e; - Literal l = (Literal) a.child(); - return l.value(); + return ((Literal) new ConstantFolding().rule(f)).value(); } // @@ -327,21 +324,21 @@ private static Object unwrapAlias(Expression e) { // public void testBinaryComparisonSimplification() { - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, L(5), L(5)))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, L(5), L(5)))); - assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, L(5), L(5)))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, FIVE, FIVE))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, FIVE, FIVE))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, FIVE, FIVE))); - assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new GreaterThan(EMPTY, L(5), L(5)))); - assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new LessThan(EMPTY, L(5), L(5)))); + assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new GreaterThan(EMPTY, FIVE, FIVE))); + assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new LessThan(EMPTY, FIVE, FIVE))); } public void testLiteralsOnTheRight() { Alias a = new Alias(EMPTY, "a", L(10)); - Expression result = new BooleanLiteralsOnTheRight().rule(new Equals(EMPTY, L(5), a)); + Expression result = new BooleanLiteralsOnTheRight().rule(new Equals(EMPTY, FIVE, a)); assertTrue(result instanceof Equals); Equals eq = (Equals) result; assertEquals(a, eq.left()); - assertEquals(L(5), eq.right()); + assertEquals(FIVE, eq.right()); } public void testBoolSimplifyOr() { @@ -390,7 +387,7 @@ public void testBoolCommonFactorExtraction() { public void testFoldExcludingRangeToFalse() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r = new Range(EMPTY, fa, L(6), false, L(5), true); + Range r = new Range(EMPTY, fa, SIX, false, FIVE, true); assertTrue(r.foldable()); assertEquals(Boolean.FALSE, r.fold()); } @@ -399,7 +396,7 @@ public void testFoldExcludingRangeToFalse() { public void testFoldExcludingRangeWithDifferentTypesToFalse() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r = new Range(EMPTY, fa, L(6), false, L(5.5d), true); + Range r = new Range(EMPTY, fa, SIX, false, L(5.5d), true); assertTrue(r.foldable()); assertEquals(Boolean.FALSE, r.fold()); } @@ -408,7 +405,7 @@ public void testFoldExcludingRangeWithDifferentTypesToFalse() { public void testCombineBinaryComparisonsNotComparable() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(6)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, SIX); LessThan lt = new LessThan(EMPTY, fa, Literal.FALSE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); @@ -420,71 +417,71 @@ public void testCombineBinaryComparisonsNotComparable() { // a <= 6 AND a < 5 -> a < 5 public void testCombineBinaryComparisonsUpper() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(6)); - LessThan lt = new LessThan(EMPTY, fa, L(5)); + LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, SIX); + LessThan lt = new LessThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, lte, lt)); assertEquals(LessThan.class, exp.getClass()); LessThan r = (LessThan) exp; - assertEquals(L(5), r.right()); + assertEquals(FIVE, r.right()); } // 6 <= a AND 5 < a -> 6 <= a public void testCombineBinaryComparisonsLower() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(6)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(5)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, SIX); + GreaterThan gt = new GreaterThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, gt)); assertEquals(GreaterThanOrEqual.class, exp.getClass()); GreaterThanOrEqual r = (GreaterThanOrEqual) exp; - assertEquals(L(6), r.right()); + assertEquals(SIX, r.right()); } // 5 <= a AND 5 < a -> 5 < a public void testCombineBinaryComparisonsInclude() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(5)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(5)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, FIVE); + GreaterThan gt = new GreaterThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, gt)); assertEquals(GreaterThan.class, exp.getClass()); GreaterThan r = (GreaterThan) exp; - assertEquals(L(5), r.right()); + assertEquals(FIVE, r.right()); } // 3 <= a AND 4 < a AND a <= 7 AND a < 6 -> 4 < a < 6 public void testCombineMultipleBinaryComparisons() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(3)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(4)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, THREE); + GreaterThan gt = new GreaterThan(EMPTY, fa, FOUR); LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(7)); - LessThan lt = new LessThan(EMPTY, fa, L(6)); + LessThan lt = new LessThan(EMPTY, fa, SIX); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, new And(EMPTY, gt, new And(EMPTY, lt, lte)))); assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(4), r.lower()); + assertEquals(FOUR, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(6), r.upper()); + assertEquals(SIX, r.upper()); assertFalse(r.includeUpper()); } // 3 <= a AND TRUE AND 4 < a AND a != 5 AND a <= 7 -> 4 < a <= 7 AND a != 5 AND TRUE public void testCombineMixedMultipleBinaryComparisons() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(3)); - GreaterThan gt = new GreaterThan(EMPTY, fa, L(4)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, THREE); + GreaterThan gt = new GreaterThan(EMPTY, fa, FOUR); LessThanOrEqual lte = new LessThanOrEqual(EMPTY, fa, L(7)); - Expression ne = new Not(EMPTY, new Equals(EMPTY, fa, L(5))); + Expression ne = new Not(EMPTY, new Equals(EMPTY, fa, FIVE)); CombineBinaryComparisons rule = new CombineBinaryComparisons(); @@ -494,7 +491,7 @@ public void testCombineMixedMultipleBinaryComparisons() { And and = ((And) exp); assertEquals(Range.class, and.right().getClass()); Range r = (Range) and.right(); - assertEquals(L(4), r.lower()); + assertEquals(FOUR, r.lower()); assertFalse(r.includeLower()); assertEquals(L(7), r.upper()); assertTrue(r.includeUpper()); @@ -503,17 +500,17 @@ public void testCombineMixedMultipleBinaryComparisons() { // 1 <= a AND a < 5 -> 1 <= a < 5 public void testCombineComparisonsIntoRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(1)); - LessThan lt = new LessThan(EMPTY, fa, L(5)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, ONE); + LessThan lt = new LessThan(EMPTY, fa, FIVE); CombineBinaryComparisons rule = new CombineBinaryComparisons(); Expression exp = rule.rule(new And(EMPTY, gte, lt)); assertEquals(Range.class, rule.rule(exp).getClass()); Range r = (Range) exp; - assertEquals(L(1), r.lower()); + assertEquals(ONE, r.lower()); assertTrue(r.includeLower()); - assertEquals(L(5), r.upper()); + assertEquals(FIVE, r.upper()); assertFalse(r.includeUpper()); } @@ -521,10 +518,10 @@ public void testCombineComparisonsIntoRange() { public void testCombineUnbalancedComparisonsMixedWithEqualsIntoRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); IsNotNull isn = new IsNotNull(EMPTY, fa); - GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, L(1)); + GreaterThanOrEqual gte = new GreaterThanOrEqual(EMPTY, fa, ONE); Equals eq = new Equals(EMPTY, fa, L(10)); - LessThan lt = new LessThan(EMPTY, fa, L(5)); + LessThan lt = new LessThan(EMPTY, fa, FIVE); And and = new And(EMPTY, new And(EMPTY, isn, gte), new And(EMPTY, lt, eq)); @@ -535,9 +532,9 @@ public void testCombineUnbalancedComparisonsMixedWithEqualsIntoRange() { assertEquals(Range.class, a.right().getClass()); Range r = (Range) a.right(); - assertEquals(L(1), r.lower()); + assertEquals(ONE, r.lower()); assertTrue(r.includeLower()); - assertEquals(L(5), r.upper()); + assertEquals(FIVE, r.upper()); assertFalse(r.includeUpper()); } @@ -545,8 +542,8 @@ public void testCombineUnbalancedComparisonsMixedWithEqualsIntoRange() { public void testCombineBinaryComparisonsConjunctionOfIncludedRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(4), false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, FOUR, false); And and = new And(EMPTY, r1, r2); @@ -559,8 +556,8 @@ public void testCombineBinaryComparisonsConjunctionOfIncludedRange() { public void testCombineBinaryComparisonsConjunctionOfNonOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(2), false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, TWO, false); And and = new And(EMPTY, r1, r2); @@ -568,9 +565,9 @@ public void testCombineBinaryComparisonsConjunctionOfNonOverlappingBoundaries() Expression exp = rule.rule(and); assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(2), r.lower()); + assertEquals(TWO, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(2), r.upper()); + assertEquals(TWO, r.upper()); assertFalse(r.includeUpper()); assertEquals(Boolean.FALSE, r.fold()); } @@ -579,8 +576,8 @@ public void testCombineBinaryComparisonsConjunctionOfNonOverlappingBoundaries() public void testCombineBinaryComparisonsConjunctionOfUpperEqualsOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); And and = new And(EMPTY, r1, r2); @@ -593,8 +590,8 @@ public void testCombineBinaryComparisonsConjunctionOfUpperEqualsOverlappingBound public void testCombineBinaryComparisonsConjunctionOverlappingUpperBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); And and = new And(EMPTY, r1, r2); @@ -607,8 +604,8 @@ public void testCombineBinaryComparisonsConjunctionOverlappingUpperBoundary() { public void testCombineBinaryComparisonsConjunctionWithDifferentUpperLimitInclusion() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); And and = new And(EMPTY, r1, r2); @@ -616,9 +613,9 @@ public void testCombineBinaryComparisonsConjunctionWithDifferentUpperLimitInclus Expression exp = rule.rule(and); assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(2), r.lower()); + assertEquals(TWO, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(3), r.upper()); + assertEquals(THREE, r.upper()); assertFalse(r.includeUpper()); } @@ -626,8 +623,8 @@ public void testCombineBinaryComparisonsConjunctionWithDifferentUpperLimitInclus public void testRangesOverlappingConjunctionNoLowerBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(0), false, L(1), true); - Range r2 = new Range(EMPTY, fa, L(0), true, L(2), false); + Range r1 = new Range(EMPTY, fa, L(0), false, ONE, true); + Range r2 = new Range(EMPTY, fa, L(0), true, TWO, false); And and = new And(EMPTY, r1, r2); @@ -641,7 +638,7 @@ public void testRangesOverlappingConjunctionNoLowerBoundary() { public void testCombineBinaryComparisonsDisjunctionNotComparable() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); + GreaterThan gt1 = new GreaterThan(EMPTY, fa, ONE); GreaterThan gt2 = new GreaterThan(EMPTY, fa, Literal.FALSE); Or or = new Or(EMPTY, gt1, gt2); @@ -656,9 +653,9 @@ public void testCombineBinaryComparisonsDisjunctionNotComparable() { public void testCombineBinaryComparisonsDisjunctionLowerBound() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); - GreaterThan gt2 = new GreaterThan(EMPTY, fa, L(2)); - GreaterThan gt3 = new GreaterThan(EMPTY, fa, L(3)); + GreaterThan gt1 = new GreaterThan(EMPTY, fa, ONE); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, TWO); + GreaterThan gt3 = new GreaterThan(EMPTY, fa, THREE); Or or = new Or(EMPTY, gt1, new Or(EMPTY, gt2, gt3)); @@ -667,16 +664,16 @@ public void testCombineBinaryComparisonsDisjunctionLowerBound() { assertEquals(GreaterThan.class, exp.getClass()); GreaterThan gt = (GreaterThan) exp; - assertEquals(L(1), gt.right()); + assertEquals(ONE, gt.right()); } // 2 < a OR 1 < a OR 3 <= a -> 1 < a public void testCombineBinaryComparisonsDisjunctionIncludeLowerBounds() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - GreaterThan gt1 = new GreaterThan(EMPTY, fa, L(1)); - GreaterThan gt2 = new GreaterThan(EMPTY, fa, L(2)); - GreaterThanOrEqual gte3 = new GreaterThanOrEqual(EMPTY, fa, L(3)); + GreaterThan gt1 = new GreaterThan(EMPTY, fa, ONE); + GreaterThan gt2 = new GreaterThan(EMPTY, fa, TWO); + GreaterThanOrEqual gte3 = new GreaterThanOrEqual(EMPTY, fa, THREE); Or or = new Or(EMPTY, new Or(EMPTY, gt1, gt2), gte3); @@ -685,16 +682,16 @@ public void testCombineBinaryComparisonsDisjunctionIncludeLowerBounds() { assertEquals(GreaterThan.class, exp.getClass()); GreaterThan gt = (GreaterThan) exp; - assertEquals(L(1), gt.right()); + assertEquals(ONE, gt.right()); } // a < 1 OR a < 2 OR a < 3 -> a < 3 public void testCombineBinaryComparisonsDisjunctionUpperBound() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThan lt1 = new LessThan(EMPTY, fa, L(1)); - LessThan lt2 = new LessThan(EMPTY, fa, L(2)); - LessThan lt3 = new LessThan(EMPTY, fa, L(3)); + LessThan lt1 = new LessThan(EMPTY, fa, ONE); + LessThan lt2 = new LessThan(EMPTY, fa, TWO); + LessThan lt3 = new LessThan(EMPTY, fa, THREE); Or or = new Or(EMPTY, new Or(EMPTY, lt1, lt2), lt3); @@ -703,16 +700,16 @@ public void testCombineBinaryComparisonsDisjunctionUpperBound() { assertEquals(LessThan.class, exp.getClass()); LessThan lt = (LessThan) exp; - assertEquals(L(3), lt.right()); + assertEquals(THREE, lt.right()); } // a < 2 OR a <= 2 OR a < 1 -> a <= 2 public void testCombineBinaryComparisonsDisjunctionIncludeUpperBounds() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThan lt1 = new LessThan(EMPTY, fa, L(1)); - LessThan lt2 = new LessThan(EMPTY, fa, L(2)); - LessThanOrEqual lte2 = new LessThanOrEqual(EMPTY, fa, L(2)); + LessThan lt1 = new LessThan(EMPTY, fa, ONE); + LessThan lt2 = new LessThan(EMPTY, fa, TWO); + LessThanOrEqual lte2 = new LessThanOrEqual(EMPTY, fa, TWO); Or or = new Or(EMPTY, lt2, new Or(EMPTY, lte2, lt1)); @@ -721,18 +718,18 @@ public void testCombineBinaryComparisonsDisjunctionIncludeUpperBounds() { assertEquals(LessThanOrEqual.class, exp.getClass()); LessThanOrEqual lte = (LessThanOrEqual) exp; - assertEquals(L(2), lte.right()); + assertEquals(TWO, lte.right()); } // a < 2 OR 3 < a OR a < 1 OR 4 < a -> a < 2 OR 3 < a public void testCombineBinaryComparisonsDisjunctionOfLowerAndUpperBounds() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - LessThan lt1 = new LessThan(EMPTY, fa, L(1)); - LessThan lt2 = new LessThan(EMPTY, fa, L(2)); + LessThan lt1 = new LessThan(EMPTY, fa, ONE); + LessThan lt2 = new LessThan(EMPTY, fa, TWO); - GreaterThan gt3 = new GreaterThan(EMPTY, fa, L(3)); - GreaterThan gt4 = new GreaterThan(EMPTY, fa, L(4)); + GreaterThan gt3 = new GreaterThan(EMPTY, fa, THREE); + GreaterThan gt4 = new GreaterThan(EMPTY, fa, FOUR); Or or = new Or(EMPTY, new Or(EMPTY, lt2, gt3), new Or(EMPTY, lt1, gt4)); @@ -744,18 +741,18 @@ public void testCombineBinaryComparisonsDisjunctionOfLowerAndUpperBounds() { assertEquals(LessThan.class, ro.left().getClass()); LessThan lt = (LessThan) ro.left(); - assertEquals(L(2), lt.right()); + assertEquals(TWO, lt.right()); assertEquals(GreaterThan.class, ro.right().getClass()); GreaterThan gt = (GreaterThan) ro.right(); - assertEquals(L(3), gt.right()); + assertEquals(THREE, gt.right()); } // (2 < a < 3) OR (1 < a < 4) -> (1 < a < 4) public void testCombineBinaryComparisonsDisjunctionOfIncludedRangeNotComparable() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, Literal.FALSE, false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, Literal.FALSE, false); Or or = new Or(EMPTY, r1, r2); @@ -769,8 +766,9 @@ public void testCombineBinaryComparisonsDisjunctionOfIncludedRangeNotComparable( public void testCombineBinaryComparisonsDisjunctionOfIncludedRange() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(4), false); + + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, FOUR, false); Or or = new Or(EMPTY, r1, r2); @@ -779,9 +777,9 @@ public void testCombineBinaryComparisonsDisjunctionOfIncludedRange() { assertEquals(Range.class, exp.getClass()); Range r = (Range) exp; - assertEquals(L(1), r.lower()); + assertEquals(ONE, r.lower()); assertFalse(r.includeLower()); - assertEquals(L(4), r.upper()); + assertEquals(FOUR, r.upper()); assertFalse(r.includeUpper()); } @@ -789,8 +787,8 @@ public void testCombineBinaryComparisonsDisjunctionOfIncludedRange() { public void testCombineBinaryComparisonsDisjunctionOfNonOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(1), false, L(2), false); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, ONE, false, TWO, false); Or or = new Or(EMPTY, r1, r2); @@ -803,8 +801,8 @@ public void testCombineBinaryComparisonsDisjunctionOfNonOverlappingBoundaries() public void testCombineBinaryComparisonsDisjunctionOfUpperEqualsOverlappingBoundaries() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); Or or = new Or(EMPTY, r1, r2); @@ -817,8 +815,8 @@ public void testCombineBinaryComparisonsDisjunctionOfUpperEqualsOverlappingBound public void testCombineBinaryComparisonsOverlappingUpperBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), false); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, false); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); Or or = new Or(EMPTY, r1, r2); @@ -831,8 +829,8 @@ public void testCombineBinaryComparisonsOverlappingUpperBoundary() { public void testCombineBinaryComparisonsWithDifferentUpperLimitInclusion() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r1 = new Range(EMPTY, fa, L(1), false, L(3), false); - Range r2 = new Range(EMPTY, fa, L(2), false, L(3), true); + Range r1 = new Range(EMPTY, fa, ONE, false, THREE, false); + Range r2 = new Range(EMPTY, fa, TWO, false, THREE, true); Or or = new Or(EMPTY, r1, r2); @@ -845,8 +843,8 @@ public void testCombineBinaryComparisonsWithDifferentUpperLimitInclusion() { public void testRangesOverlappingNoLowerBoundary() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Range r2 = new Range(EMPTY, fa, L(0), false, L(2), false); - Range r1 = new Range(EMPTY, fa, L(0), false, L(1), true); + Range r2 = new Range(EMPTY, fa, L(0), false, TWO, false); + Range r1 = new Range(EMPTY, fa, L(0), false, ONE, true); Or or = new Or(EMPTY, r1, r2); @@ -860,8 +858,8 @@ public void testRangesOverlappingNoLowerBoundary() { // a == 1 AND a == 2 -> FALSE public void testDualEqualsConjunction() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Equals eq1 = new Equals(EMPTY, fa, L(1)); - Equals eq2 = new Equals(EMPTY, fa, L(2)); + Equals eq1 = new Equals(EMPTY, fa, ONE); + Equals eq2 = new Equals(EMPTY, fa, TWO); PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, eq2)); @@ -871,8 +869,8 @@ public void testDualEqualsConjunction() { // 1 <= a < 10 AND a == 1 -> a == 1 public void testEliminateRangeByEqualsInInterval() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); - Equals eq1 = new Equals(EMPTY, fa, L(1)); - Range r = new Range(EMPTY, fa, L(1), true, L(10), false); + Equals eq1 = new Equals(EMPTY, fa, ONE); + Range r = new Range(EMPTY, fa, ONE, true, L(10), false); PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, r)); @@ -883,7 +881,7 @@ public void testEliminateRangeByEqualsInInterval() { public void testEliminateRangeByEqualsOutsideInterval() { FieldAttribute fa = new FieldAttribute(EMPTY, "a", new EsField("af", DataType.INTEGER, emptyMap(), true)); Equals eq1 = new Equals(EMPTY, fa, L(10)); - Range r = new Range(EMPTY, fa, L(1), false, L(10), false); + Range r = new Range(EMPTY, fa, ONE, false, L(10), false); PropagateEquals rule = new PropagateEquals(); Expression exp = rule.rule(new And(EMPTY, eq1, r)); diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index ab8f9172b690c..c0fb7eb2b77d7 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -155,9 +155,6 @@ subprojects { // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' - // debug logging for testRecovery see https://github.com/elastic/x-pack-elasticsearch/issues/2691 - setting 'logger.level', 'DEBUG' - setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' setting 'xpack.ssl.keystore.path', 'testnode.jks' @@ -185,6 +182,7 @@ subprojects { systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } @@ -201,9 +199,6 @@ subprojects { setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' waitCondition = waitWithAuth - // debug logging for testRecovery see https://github.com/elastic/x-pack-elasticsearch/issues/2691 - setting 'logger.level', 'DEBUG' - // some tests rely on the translog not being flushed setting 'indices.memory.shard_inactive_time', '20m' setting 'xpack.security.enabled', 'true' @@ -224,6 +219,7 @@ subprojects { systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 7c4eda37d2fb0..8a6944fb87037 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; @@ -18,6 +17,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -54,35 +54,13 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -public class FullClusterRestartIT extends ESRestTestCase { - private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); +public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { @Before public void waitForMlTemplates() throws Exception { XPackRestTestHelper.waitForMlTemplates(client()); } - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveSnapshotsUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - @Override - protected boolean preserveTemplatesUponCompletion() { - return true; - } - @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -103,7 +81,7 @@ public void testSingleDoc() throws IOException { String docLocation = "/testsingledoc/doc/1"; String doc = "{\"test\": \"test\"}"; - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { Request createDoc = new Request("PUT", docLocation); createDoc.addParameter("refresh", "true"); createDoc.setJsonEntity(doc); @@ -115,7 +93,7 @@ public void testSingleDoc() throws IOException { @SuppressWarnings("unchecked") public void testSecurityNativeRealm() throws Exception { - if (runningAgainstOldCluster) { + if (isRunningAgainstOldCluster()) { createUser("preupgrade_user"); createRole("preupgrade_role"); } else { @@ -165,15 +143,15 @@ public void testSecurityNativeRealm() throws Exception { assertUserInfo("preupgrade_user"); assertRoleInfo("preupgrade_role"); - if (!runningAgainstOldCluster) { + if (isRunningAgainstOldCluster() == false) { assertUserInfo("postupgrade_user"); assertRoleInfo("postupgrade_role"); } } public void testWatcher() throws Exception { - if (runningAgainstOldCluster) { - logger.info("Adding a watch on old cluster {}", oldClusterVersion); + if (isRunningAgainstOldCluster()) { + logger.info("Adding a watch on old cluster {}", getOldClusterVersion()); Request createBwcWatch = new Request("PUT", "_xpack/watcher/watch/bwc_watch"); createBwcWatch.setJsonEntity(loadWatch("simple-watch.json")); client().performRequest(createBwcWatch); @@ -194,7 +172,7 @@ public void testWatcher() throws Exception { waitForHits(".watcher-history*", 2); logger.info("Done creating watcher-related indices"); } else { - logger.info("testing against {}", oldClusterVersion); + logger.info("testing against {}", getOldClusterVersion()); waitForYellow(".watches,bwc_watch_index,.watcher-history*"); logger.info("checking if the upgrade procedure on the new cluster is required"); @@ -264,8 +242,8 @@ public void testWatcher() throws Exception { * Tests that a RollUp job created on a old cluster is correctly restarted after the upgrade. */ public void testRollupAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); - if (runningAgainstOldCluster) { + assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); + if (isRunningAgainstOldCluster()) { final int numDocs = 59; final int year = randomIntBetween(1970, 2018); @@ -315,7 +293,7 @@ public void testRollupAfterRestart() throws Exception { final Request clusterHealthRequest = new Request("GET", "/_cluster/health"); clusterHealthRequest.addParameter("wait_for_status", "yellow"); clusterHealthRequest.addParameter("wait_for_no_relocating_shards", "true"); - if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { clusterHealthRequest.addParameter("wait_for_no_initializing_shards", "true"); } Map clusterHealthResponse = entityAsMap(client().performRequest(clusterHealthRequest)); @@ -326,9 +304,9 @@ public void testRollupAfterRestart() throws Exception { } public void testRollupIDSchemeAfterRestart() throws Exception { - assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); - assumeTrue("Rollup ID scheme changed in 6.4", oldClusterVersion.before(Version.V_6_4_0)); - if (runningAgainstOldCluster) { + assumeTrue("Rollup can be tested with 6.3.0 and onwards", getOldClusterVersion().onOrAfter(Version.V_6_3_0)); + assumeTrue("Rollup ID scheme changed in 6.4", getOldClusterVersion().before(Version.V_6_4_0)); + if (isRunningAgainstOldCluster()) { final Request indexRequest = new Request("POST", "/id-test-rollup/_doc/1"); indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-01T00:00:01\",\"value\":123}"); @@ -439,8 +417,8 @@ public void testRollupIDSchemeAfterRestart() throws Exception { public void testSqlFailsOnIndexWithTwoTypes() throws IOException { // TODO this isn't going to trigger until we backport to 6.1 assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", - oldClusterVersion.before(Version.V_6_0_0_alpha1)); - if (runningAgainstOldCluster) { + getOldClusterVersion().before(Version.V_6_0_0_alpha1)); + if (isRunningAgainstOldCluster()) { Request doc1 = new Request("POST", "/testsqlfailsonindexwithtwotypes/type1"); doc1.setJsonEntity("{}"); client().performRequest(doc1); @@ -550,7 +528,7 @@ private void waitForYellow(String indexName) throws IOException { request.addParameter("wait_for_status", "yellow"); request.addParameter("timeout", "30s"); request.addParameter("wait_for_no_relocating_shards", "true"); - if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + if (getOldClusterVersion().onOrAfter(Version.V_6_2_0)) { request.addParameter("wait_for_no_initializing_shards", "true"); } Map response = entityAsMap(client().performRequest(request)); @@ -668,7 +646,7 @@ private void assertRollUpJob(final String rollupJob) throws Exception { // Persistent task state field has been renamed in 6.4.0 from "status" to "state" final String stateFieldName - = (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_4_0)) ? "status" : "state"; + = (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_4_0)) ? "status" : "state"; final String jobStateField = "task.xpack/rollup/job." + stateFieldName + ".job_state"; assertThat("Expected field [" + jobStateField + "] to be started or indexing in " + task.get("id"), diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java new file mode 100644 index 0000000000000..a679604a546fc --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartSettingsUpgradeIT.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class FullClusterRestartSettingsUpgradeIT extends org.elasticsearch.upgrades.FullClusterRestartSettingsUpgradeIT { + + @Override + protected Settings restClientSettings() { + final String token = + "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java index f5b9381c54b31..601dca8abd417 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java @@ -65,6 +65,8 @@ public void testShowFunctionsLikeInfix() throws IOException { assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_YEAR\\s*\\|\\s*SCALAR\\s*")); assertThat(readLine(), RegexMatcher.matches("\\s*HOUR_OF_DAY\\s*\\|\\s*SCALAR\\s*")); assertThat(readLine(), RegexMatcher.matches("\\s*MINUTE_OF_DAY\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_NAME\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAYNAME\\s*\\|\\s*SCALAR\\s*")); assertEquals("", readLine()); } } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java index a5e8b549bce8f..856629f8d9188 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java @@ -113,18 +113,18 @@ private static Tuple extractColumnTypesAndStripCli(String expect } private static Tuple extractColumnTypesFromHeader(String header) { - String[] columnTypes = Strings.delimitedListToStringArray(header, "|", " \t"); + String[] columnTypes = Strings.tokenizeToStringArray(header, "|"); StringBuilder types = new StringBuilder(); StringBuilder columns = new StringBuilder(); for (String column : columnTypes) { - String[] nameType = Strings.delimitedListToStringArray(column, ":"); + String[] nameType = Strings.delimitedListToStringArray(column.trim(), ":"); assertThat("If at least one column has a type associated with it, all columns should have types", nameType, arrayWithSize(2)); if (types.length() > 0) { types.append(","); columns.append("|"); } - columns.append(nameType[0]); - types.append(resolveColumnType(nameType[1])); + columns.append(nameType[0].trim()); + types.append(resolveColumnType(nameType[1].trim())); } return new Tuple<>(columns.toString(), types.toString()); } @@ -206,4 +206,4 @@ public static class CsvTestCase { public String query; public String expectedResults; } -} \ No newline at end of file +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java index 47f531ebd1f9b..133006c66a820 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java @@ -176,8 +176,8 @@ private static void doAssertResultSetData(ResultSet expected, ResultSet actual, Object expectedObject = expected.getObject(column); Object actualObject = lenient ? actual.getObject(column, expectedColumnClass) : actual.getObject(column); - String msg = format(Locale.ROOT, "Different result for column [" + metaData.getColumnName(column) + "], " - + "entry [" + (count + 1) + "]"); + String msg = format(Locale.ROOT, "Different result for column [%s], entry [%d]", + metaData.getColumnName(column), count + 1); // handle nulls first if (expectedObject == null || actualObject == null) { @@ -230,4 +230,4 @@ private static int typeOf(int columnType, boolean lenient) { return columnType; } -} \ No newline at end of file +} diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/qa/sql/src/main/resources/command.csv-spec index 77d397fa2b5be..28aadeded2cc1 100644 --- a/x-pack/qa/sql/src/main/resources/command.csv-spec +++ b/x-pack/qa/sql/src/main/resources/command.csv-spec @@ -38,6 +38,11 @@ MONTH |SCALAR YEAR |SCALAR WEEK_OF_YEAR |SCALAR WEEK |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR +MONTH_NAME |SCALAR +MONTHNAME |SCALAR +QUARTER |SCALAR ABS |SCALAR ACOS |SCALAR ASIN |SCALAR @@ -130,6 +135,8 @@ DAY_OF_WEEK |SCALAR DAY_OF_YEAR |SCALAR HOUR_OF_DAY |SCALAR MINUTE_OF_DAY |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR ; showTables diff --git a/x-pack/qa/sql/src/main/resources/datetime.sql-spec b/x-pack/qa/sql/src/main/resources/datetime.sql-spec index 20ea8329c8f4d..81012b7bebf92 100644 --- a/x-pack/qa/sql/src/main/resources/datetime.sql-spec +++ b/x-pack/qa/sql/src/main/resources/datetime.sql-spec @@ -12,34 +12,83 @@ dateTimeDay SELECT DAY(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + dateTimeDayOfMonth SELECT DAY_OF_MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + dateTimeMonth SELECT MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + dateTimeYear SELECT YEAR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +monthNameFromStringDate +SELECT MONTHNAME(CAST('2018-09-03' AS TIMESTAMP)) month FROM "test_emp" limit 1; + +dayNameFromStringDate +SELECT DAYNAME(CAST('2018-09-03' AS TIMESTAMP)) day FROM "test_emp" limit 1; + +quarterSelect +SELECT QUARTER(hire_date) q, hire_date FROM test_emp ORDER BY hire_date LIMIT 15; + // // Filter // + dateTimeFilterDayOfMonth SELECT DAY_OF_MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE DAY_OF_MONTH(birth_date) <= 10 ORDER BY emp_no LIMIT 5; + dateTimeFilterMonth SELECT MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE MONTH(birth_date) <= 5 ORDER BY emp_no LIMIT 5; + dateTimeFilterYear SELECT YEAR(birth_date) AS d, last_name l FROM "test_emp" WHERE YEAR(birth_date) <= 1960 ORDER BY emp_no LIMIT 5; +monthNameFilterWithFirstLetter +SELECT MONTHNAME(hire_date) AS m, hire_date FROM "test_emp" WHERE LEFT(MONTHNAME(hire_date), 1) = 'J' ORDER BY hire_date LIMIT 10; + +monthNameFilterWithFullName +SELECT MONTHNAME(hire_date) AS m, hire_date FROM "test_emp" WHERE MONTHNAME(hire_date) = 'August' ORDER BY hire_date LIMIT 10; + +dayNameFilterWithFullName +SELECT DAYNAME(hire_date) AS d, hire_date FROM "test_emp" WHERE DAYNAME(hire_date) = 'Sunday' ORDER BY hire_date LIMIT 10; + +dayNameAndMonthNameAsFilter +SELECT first_name, last_name FROM "test_emp" WHERE DAYNAME(hire_date) = 'Sunday' AND MONTHNAME(hire_date) = 'January' ORDER BY hire_date LIMIT 10; + +quarterWithFilter +SELECT QUARTER(hire_date) quarter, hire_date FROM test_emp WHERE QUARTER(hire_date) > 2 ORDER BY hire_date LIMIT 15; // // Aggregate // - dateTimeAggByYear SELECT YEAR(birth_date) AS d, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY YEAR(birth_date) ORDER BY YEAR(birth_date) LIMIT 13; -dateTimeAggByMonth +dateTimeAggByMonthWithOrderBy SELECT MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY MONTH(birth_date) ORDER BY MONTH(birth_date) DESC; -dateTimeAggByDayOfMonth +dateTimeAggByDayOfMonthWithOrderBy SELECT DAY_OF_MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY DAY_OF_MONTH(birth_date) ORDER BY DAY_OF_MONTH(birth_date) DESC; + +monthNameWithGroupBy +SELECT MONTHNAME("hire_date") AS month, COUNT(*) AS count FROM "test_emp" GROUP BY MONTHNAME("hire_date"), MONTH("hire_date") ORDER BY MONTH("hire_date"); + +monthNameWithDoubleGroupByAndOrderBy +SELECT MONTHNAME("hire_date") AS month, COUNT(*) AS count FROM "test_emp" GROUP BY MONTHNAME("hire_date"), MONTH("hire_date") ORDER BY MONTHNAME("hire_date") DESC; + +// AwaitsFix https://github.com/elastic/elasticsearch/issues/33519 +// monthNameWithGroupByOrderByAndHaving +// SELECT CAST(MAX("salary") AS DOUBLE) max_salary, MONTHNAME("hire_date") month_name FROM "test_emp" GROUP BY MONTHNAME("hire_date") HAVING MAX("salary") > 50000 ORDER BY MONTHNAME(hire_date); +// dayNameWithHaving +// SELECT DAYNAME("hire_date") FROM "test_emp" GROUP BY DAYNAME("hire_date") HAVING MAX("emp_no") > ASCII(DAYNAME("hire_date")); + +dayNameWithDoubleGroupByAndOrderBy +SELECT COUNT(*) c, DAYNAME(hire_date) day_name, DAY(hire_date) day FROM test_emp WHERE MONTHNAME(hire_date) = 'August' GROUP BY DAYNAME(hire_date), DAY(hire_date) ORDER BY DAYNAME(hire_date), DAY(hire_date); + +dayNameWithGroupByOrderByAndHaving +SELECT CAST(MAX(salary) AS DOUBLE) max_salary, DAYNAME(hire_date) day_name FROM test_emp GROUP BY DAYNAME(hire_date) HAVING MAX(salary) > 50000 ORDER BY DAYNAME("hire_date"); + +quarterWithGroupByAndOrderBy +SELECT QUARTER(hire_date) quarter, COUNT(*) hires FROM test_emp GROUP BY QUARTER(hire_date) ORDER BY QUARTER(hire_date); \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/docs.csv-spec b/x-pack/qa/sql/src/main/resources/docs.csv-spec index 2a4f29fcf5d9a..52356bdfd52eb 100644 --- a/x-pack/qa/sql/src/main/resources/docs.csv-spec +++ b/x-pack/qa/sql/src/main/resources/docs.csv-spec @@ -214,6 +214,11 @@ MONTH |SCALAR YEAR |SCALAR WEEK_OF_YEAR |SCALAR WEEK |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR +MONTH_NAME |SCALAR +MONTHNAME |SCALAR +QUARTER |SCALAR ABS |SCALAR ACOS |SCALAR ASIN |SCALAR @@ -318,7 +323,9 @@ DAY |SCALAR DAY_OF_WEEK |SCALAR DAY_OF_YEAR |SCALAR HOUR_OF_DAY |SCALAR -MINUTE_OF_DAY |SCALAR +MINUTE_OF_DAY |SCALAR +DAY_NAME |SCALAR +DAYNAME |SCALAR // end::showFunctionsWithPattern ; diff --git a/x-pack/qa/sql/src/main/resources/functions.csv-spec b/x-pack/qa/sql/src/main/resources/functions.csv-spec index 1a610aec04861..3622cfe043381 100644 --- a/x-pack/qa/sql/src/main/resources/functions.csv-spec +++ b/x-pack/qa/sql/src/main/resources/functions.csv-spec @@ -407,3 +407,26 @@ SELECT CONCAT(CONCAT(SUBSTRING("first_name",1,LENGTH("first_name")-2),UCASE(LEFT ---------------+--------------------------------------------- AlejandRo |2 ; + + +checkColumnNameWithNestedArithmeticFunctionCallsOnTableColumn +SELECT CHAR(emp_no % 10000) FROM "test_emp" WHERE emp_no > 10064 ORDER BY emp_no LIMIT 1; + +CHAR(((emp_no) % 10000)):s +A +; + +checkColumnNameWithComplexNestedArithmeticFunctionCallsOnTableColumn1 +SELECT CHAR(emp_no % (7000 + 3000)) FROM "test_emp" WHERE emp_no > 10065 ORDER BY emp_no LIMIT 1; + +CHAR(((emp_no) % ((7000 + 3000)))):s +B +; + + +checkColumnNameWithComplexNestedArithmeticFunctionCallsOnTableColumn2 +SELECT CHAR((emp_no % (emp_no - 1 + 1)) + 67) FROM "test_emp" WHERE emp_no > 10066 ORDER BY emp_no LIMIT 1; + +CHAR(((((emp_no) % (((((emp_no) - 1)) + 1)))) + 67)):s +C +; diff --git a/x-pack/qa/sql/src/main/resources/math.sql-spec b/x-pack/qa/sql/src/main/resources/math.sql-spec index e38de2aa6bcbf..6452d2a3ac0a6 100644 --- a/x-pack/qa/sql/src/main/resources/math.sql-spec +++ b/x-pack/qa/sql/src/main/resources/math.sql-spec @@ -128,7 +128,9 @@ mathATan2 // tag::atan2 SELECT ATAN2(emp_no, emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; // end::atan2 -mathPower // tag::power +mathPowerPositive SELECT POWER(emp_no, 2) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathPowerNegative +SELECT POWER(salary, -1) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; // end::power diff --git a/x-pack/qa/sql/src/main/resources/string-functions.sql-spec b/x-pack/qa/sql/src/main/resources/string-functions.sql-spec index 15bb6dea935c8..c0b0430b27897 100644 --- a/x-pack/qa/sql/src/main/resources/string-functions.sql-spec +++ b/x-pack/qa/sql/src/main/resources/string-functions.sql-spec @@ -1,5 +1,6 @@ stringAscii SELECT ASCII(first_name) s FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + stringChar SELECT CHAR(emp_no % 10000) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; @@ -9,6 +10,9 @@ SELECT emp_no, ASCII(first_name) a FROM "test_emp" WHERE ASCII(first_name) < 100 stringAsciiEqualsConstant SELECT emp_no, ASCII(first_name) a, first_name name FROM "test_emp" WHERE ASCII(first_name) = 65 ORDER BY emp_no; +stringAsciiInline +SELECT ASCII('E') e; + //https://github.com/elastic/elasticsearch/issues/31863 //stringSelectConstantAsciiEqualsConstant //SELECT ASCII('A') = 65 a FROM "test_emp" WHERE ASCII('A') = 65 ORDER BY emp_no; @@ -16,12 +20,105 @@ SELECT emp_no, ASCII(first_name) a, first_name name FROM "test_emp" WHERE ASCII( stringCharFilter SELECT emp_no, CHAR(emp_no % 10000) m FROM "test_emp" WHERE CHAR(emp_no % 10000) = 'A'; +stringSelectCharInline1 +SELECT CHAR(250) c; + +stringSelectCharInline2 +SELECT CHAR(2) c; + +charLengthInline1 +SELECT CAST(CHAR_LENGTH('Elasticsearch') AS INT) charlength; + +charLengthInline2 +SELECT CAST(CHAR_LENGTH(' Elasticsearch ') AS INT) charlength; + +charLengthInline3 +SELECT CAST(CHAR_LENGTH('') AS INT) charlength; + +concatInline1 +SELECT CONCAT('Elastic','search') concat; + +concatInline2 +SELECT CONCAT(CONCAT('Lucene And ', 'Elastic'),'search') concat; + +concatInline3 +SELECT CONCAT(CONCAT('Lucene And ', 'Elastic'),CONCAT('search','')) concat; + lcaseFilter SELECT LCASE(first_name) lc, CHAR(ASCII(LCASE(first_name))) chr FROM "test_emp" WHERE CHAR(ASCII(LCASE(first_name))) = 'a'; +lcaseInline1 +SELECT LCASE('') L; + +lcaseInline2 +SELECT LCASE('ElAsTiC fantastic') lower; + +leftInline1 +SELECT LEFT('Elasticsearch', 7) leftchars; + +leftInline2 +SELECT LEFT('Elasticsearch', 1) leftchars; + +leftInline3 +SELECT LEFT('Elasticsearch', 25) leftchars; + +leftInline4 +SELECT LEFT('Elasticsearch', LENGTH('abcdefghijklmnop')) leftchars; + ltrimFilter SELECT LTRIM(first_name) lt FROM "test_emp" WHERE LTRIM(first_name) = 'Bob'; +ltrimInline1 +SELECT LTRIM(' Elastic ') trimmed; + +ltrimInline2 +SELECT LTRIM(' ') trimmed; + +locateInline1 +SELECT LOCATE('a', 'Elasticsearch', 8) location; + +locateInline2 +SELECT LOCATE('a', 'Elasticsearch') location; + +locateInline3 +SELECT LOCATE('x', 'Elasticsearch') location; + +insertInline1 +SELECT INSERT('Insert [here] your comment!', 8, 6, '(random thoughts about Elasticsearch)') ins; + +insertInline2 +SELECT INSERT('Insert [here] your comment!', 8, 20, '(random thoughts about Elasticsearch)') ins; + +insertInline3 +SELECT INSERT('Insert [here] your comment!', 8, 19, '(random thoughts about Elasticsearch)') ins; + +positionInline1 +SELECT POSITION('a','Elasticsearch') pos; + +positionInline2 +SELECT POSITION('x','Elasticsearch') pos; + +repeatInline1 +SELECT REPEAT('Elastic',2) rep; + +repeatInline2 +SELECT REPEAT('Elastic',1) rep; + +replaceInline1 +SELECT REPLACE('Elasticsearch','sea','A') repl; + +replaceInline2 +SELECT REPLACE('Elasticsearch','x','A') repl; + +rightInline1 +SELECT RIGHT('Elasticsearch', LENGTH('Search')) rightchars; + +rightInline2 +SELECT RIGHT(CONCAT('Elastic','search'), LENGTH('Search')) rightchars; + +rightInline3 +SELECT RIGHT('Elasticsearch', 0) rightchars; + // Unsupported yet // Functions combined with 'LIKE' should perform the match inside a Painless script, whereas at the moment it's handled as a regular `match` query in ES. //ltrimFilterWithLike @@ -30,15 +127,45 @@ SELECT LTRIM(first_name) lt FROM "test_emp" WHERE LTRIM(first_name) = 'Bob'; rtrimFilter SELECT RTRIM(first_name) rt FROM "test_emp" WHERE RTRIM(first_name) = 'Johnny'; +rtrimInline1 +SELECT RTRIM(' Elastic ') trimmed; + +rtrimInline2 +SELECT RTRIM(' ') trimmed; + spaceFilter SELECT SPACE(languages) spaces, languages FROM "test_emp" WHERE SPACE(languages) = ' '; spaceFilterWithLengthFunctions SELECT SPACE(languages) spaces, languages, first_name FROM "test_emp" WHERE CHAR_LENGTH(SPACE(languages)) = 3 ORDER BY first_name; +spaceInline1 +SELECT SPACE(5) space; + +spaceInline1 +SELECT SPACE(0) space; + +substringInline1 +SELECT SUBSTRING('Elasticsearch', 1, 7) sub; + +substringInline2 +SELECT SUBSTRING('Elasticsearch', 1, 15) sub; + +substringInline3 +SELECT SUBSTRING('Elasticsearch', 10, 10) sub; + ucaseFilter SELECT UCASE(gender) uppercased, COUNT(*) count FROM "test_emp" WHERE UCASE(gender) = 'F' GROUP BY UCASE(gender); +ucaseInline1 +SELECT UCASE('ElAsTiC') upper; + +ucaseInline2 +SELECT UCASE('') upper; + +ucaseInline3 +SELECT UCASE(' elastic ') upper; + // // Group and order by //